diff --git a/build.gradle b/build.gradle index 3b798af..e2017a6 100644 --- a/build.gradle +++ b/build.gradle @@ -81,7 +81,7 @@ tasks.named('test') { // Code coverage jacoco { - toolVersion = "0.8.8" + toolVersion = "0.8.11" } jacocoTestReport { diff --git a/src/main/java/com/studysync/config/DatabaseReloadService.java b/src/main/java/com/studysync/config/DatabaseReloadService.java index 028ddfc..8eb96c4 100644 --- a/src/main/java/com/studysync/config/DatabaseReloadService.java +++ b/src/main/java/com/studysync/config/DatabaseReloadService.java @@ -14,6 +14,7 @@ import javax.sql.DataSource; import java.nio.file.Files; import java.nio.file.Path; +import java.sql.Connection; /** * Service that shuts down and reopens the H2 database in-place, allowing the @@ -22,9 +23,9 @@ *

The reload cycle is: *

    *
  1. {@code SHUTDOWN} — H2 closes its engine (caches flushed, file lock released).
  2. - *
  3. Soft-evict all pooled connections so HikariCP discards them.
  4. - *
  5. A test query forces HikariCP to create a fresh connection, which makes - * H2 open the (now-replaced) database file.
  6. + *
  7. Evict all pooled connections and wait for active connections to drain.
  8. + *
  9. A test query (with retries) forces HikariCP to create a fresh connection, + * which makes H2 open the (now-replaced) database file.
  10. *
  11. Schema migrations ({@code schema.sql}) are re-applied to ensure the * downloaded database has all required columns/indexes.
  12. *
@@ -34,6 +35,15 @@ public class DatabaseReloadService { private static final Logger logger = LoggerFactory.getLogger(DatabaseReloadService.class); + /** Maximum time (ms) to wait for active connections to drain after SHUTDOWN. */ + private static final long DRAIN_TIMEOUT_MS = 3000; + /** Interval (ms) between drain-wait polls. */ + private static final long DRAIN_POLL_MS = 100; + /** Number of reconnect attempts before giving up. */ + private static final int RECONNECT_RETRIES = 5; + /** Delay (ms) between reconnect retries. */ + private static final long RECONNECT_RETRY_DELAY_MS = 500; + private final DataSource dataSource; private final JdbcTemplate jdbcTemplate; @@ -45,6 +55,7 @@ public DatabaseReloadService(DataSource dataSource, JdbcTemplate jdbcTemplate) { /** * Shuts down the H2 engine and evicts all pooled connections, releasing the * file lock so the {@code .mv.db} file can be safely replaced on any OS. + * Blocks until all active connections have drained (up to a timeout). * Must be followed by a call to {@link #reconnect()} once the file is ready. */ public void shutdown() { @@ -58,28 +69,97 @@ public void shutdown() { logger.debug("H2 SHUTDOWN completed (exception expected): {}", e.getMessage()); } - // 2. Tell HikariCP to discard every idle/returned connection + // 2. Evict connections and wait for active ones to drain + int remaining = -1; if (dataSource instanceof HikariDataSource hikari) { HikariPoolMXBean pool = hikari.getHikariPoolMXBean(); if (pool != null) { pool.softEvictConnections(); + + // Wait for all active (checked-out) connections to be returned + // and evicted, so H2 fully releases the file lock. + long deadline = System.currentTimeMillis() + DRAIN_TIMEOUT_MS; + while (pool.getActiveConnections() > 0 + && System.currentTimeMillis() < deadline) { + try { + Thread.sleep(DRAIN_POLL_MS); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + remaining = pool.getActiveConnections(); } } + + if (remaining > 0) { + logger.warn("H2 shutdown completed with {} active connection(s) still present; " + + "file lock may not be fully released", remaining); + } else if (remaining == 0) { + logger.info("H2 shutdown complete; all connections drained"); + } else { + logger.info("H2 shutdown complete"); + } } /** * Reconnects to the H2 database file (which may have been replaced since * {@link #shutdown()}) and re-applies schema migrations. + * + *

Uses retries because HikariCP may still hand out stale connections on + * the first attempt if soft-eviction hasn't fully propagated. */ public void reconnect() { logger.info("Reconnecting to H2 database…"); - // Force a fresh connection — H2 opens the (possibly replaced) file - try { - jdbcTemplate.queryForObject("SELECT 1", Integer.class); - } catch (Exception e) { - logger.error("Failed to reconnect after reload: {}", e.getMessage()); - throw new RuntimeException("Database reload failed — application may need a restart", e); + // Retry loop: validate a raw connection AND verify JdbcTemplate can + // query through the pool. Both checks use the same attempt so that + // stale connections are fully drained before we proceed to migrations. + Exception lastException = null; + for (int attempt = 1; attempt <= RECONNECT_RETRIES; attempt++) { + try { + try (Connection conn = dataSource.getConnection()) { + if (!conn.isValid(2)) { + throw new RuntimeException("Connection.isValid() returned false"); + } + } + // Verify JdbcTemplate can also query (uses a potentially + // different pooled connection than the one validated above). + Integer result = jdbcTemplate.queryForObject("SELECT 1", Integer.class); + if (result == null || result != 1) { + throw new RuntimeException("SELECT 1 returned unexpected result: " + result); + } + logger.info("Database connection verified (attempt {})", attempt); + break; + } catch (Exception e) { + lastException = e; + logger.debug("Reconnect attempt {}/{} failed: {}", attempt, + RECONNECT_RETRIES, e.getMessage()); + if (attempt == RECONNECT_RETRIES) { + String msg = "Database reconnect failed after " + RECONNECT_RETRIES + + " attempts — application may need a restart"; + logger.error(msg, e); + throw new RuntimeException(msg, e); + } + try { + Thread.sleep(RECONNECT_RETRY_DELAY_MS); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + RuntimeException interrupted = new RuntimeException( + "Database reconnect interrupted while waiting to retry", ie); + if (lastException != null && lastException != ie) { + interrupted.addSuppressed(lastException); + } + throw interrupted; + } + // Re-evict to clear any remaining stale connections + if (dataSource instanceof HikariDataSource hikari) { + HikariPoolMXBean pool = hikari.getHikariPoolMXBean(); + if (pool != null) { + pool.softEvictConnections(); + } + } + } } // Run idempotent schema.sql to apply any missing migrations @@ -109,31 +189,39 @@ private void runMigrations() { populator.setContinueOnError(true); // individual failures are non-fatal populator.setSeparator(";"); populator.execute(dataSource); - logger.info("Schema migrations re-applied after database reload using {} (continueOnError=true; individual statement failures were silently ignored)", + logger.info("Schema migrations re-applied after database reload using {} " + + "(continueOnError=true; individual statement failures were silently ignored)", schemaResource.getDescription()); } catch (Exception e) { - logger.error("Failed to re-apply schema.sql after reload — the database may be missing columns/tables", e); + logger.error("Failed to re-apply schema.sql after reload" + + " — the database may be missing columns/tables", e); } } private Resource resolveSchemaResource() { - Resource classpathSchema = new ClassPathResource("schema.sql", DatabaseReloadService.class.getClassLoader()); + Resource classpathSchema = new ClassPathResource("schema.sql", + DatabaseReloadService.class.getClassLoader()); if (classpathSchema.exists()) { return classpathSchema; } - Path installedSchema = Path.of(System.getProperty("user.home"), ".local", "share", "studysync", "resources", "schema.sql"); + Path installedSchema = Path.of(System.getProperty("user.home"), + ".local", "share", "studysync", "resources", "schema.sql"); if (Files.exists(installedSchema)) { - logger.warn("schema.sql not found on classpath; falling back to installed resource file: {}", installedSchema); + logger.warn("schema.sql not found on classpath; " + + "falling back to installed resource file: {}", installedSchema); return new FileSystemResource(installedSchema); } - Path projectSchema = Path.of("src", "main", "resources", "schema.sql").toAbsolutePath(); + Path projectSchema = Path.of("src", "main", "resources", "schema.sql") + .toAbsolutePath(); if (Files.exists(projectSchema)) { - logger.warn("schema.sql not found on classpath; falling back to project resource file: {}", projectSchema); + logger.warn("schema.sql not found on classpath; " + + "falling back to project resource file: {}", projectSchema); return new FileSystemResource(projectSchema); } - throw new IllegalStateException("schema.sql not found in classpath, installed resources, or project resources"); + throw new IllegalStateException( + "schema.sql not found in classpath, installed resources, or project resources"); } } diff --git a/src/main/java/com/studysync/domain/service/StudyService.java b/src/main/java/com/studysync/domain/service/StudyService.java index 0769912..0526409 100644 --- a/src/main/java/com/studysync/domain/service/StudyService.java +++ b/src/main/java/com/studysync/domain/service/StudyService.java @@ -45,6 +45,19 @@ public StudyService(GoogleDriveService googleDriveService, DateTimeService dateT this.dateTimeService = dateTimeService; } + /** + * Clears cached processing guards so that delayed-goal processing + * re-runs against the newly loaded database. + * Must be called after a live database reload (e.g. Google Drive download). + */ + @Transactional(propagation = org.springframework.transaction.annotation.Propagation.NOT_SUPPORTED) + public void resetAfterReload() { + synchronized (this) { + lastDelayProcessingDate = null; + } + logger.info("StudyService caches reset after DB reload"); + } + private void markDirty() { if (TransactionSynchronizationManager.isSynchronizationActive()) { TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronization() { diff --git a/src/main/java/com/studysync/domain/service/TaskService.java b/src/main/java/com/studysync/domain/service/TaskService.java index 357bdf1..0ae5d5c 100644 --- a/src/main/java/com/studysync/domain/service/TaskService.java +++ b/src/main/java/com/studysync/domain/service/TaskService.java @@ -57,6 +57,19 @@ public TaskService(CategoryService categoryService, GoogleDriveService googleDri this.dateTimeService = dateTimeService; } + /** + * Clears cached processing guards so that delay-marking and other + * once-per-day operations re-run against the newly loaded database. + * Must be called after a live database reload (e.g. Google Drive download). + */ + @Transactional(propagation = org.springframework.transaction.annotation.Propagation.NOT_SUPPORTED) + public void resetAfterReload() { + synchronized (this) { + lastDelayedTasksProcessedDate = null; + } + logger.info("TaskService caches reset after DB reload"); + } + private void markDirty() { if (TransactionSynchronizationManager.isSynchronizationActive()) { TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronization() { diff --git a/src/main/java/com/studysync/presentation/ui/StudySyncUI.java b/src/main/java/com/studysync/presentation/ui/StudySyncUI.java index c6befa2..6e1e45c 100644 --- a/src/main/java/com/studysync/presentation/ui/StudySyncUI.java +++ b/src/main/java/com/studysync/presentation/ui/StudySyncUI.java @@ -170,8 +170,14 @@ public void start(Stage primaryStage) { // Register pre-reload listener to show a blocking overlay during DB reload googleDriveService.addPreReloadListener(() -> Platform.runLater(this::showReloadOverlay)); - // Register reload listener to refresh all panels when DB is reloaded from Drive + // Register reload listener to reset service caches and refresh all panels + // when the DB is reloaded from Drive. googleDriveService.addReloadListener(() -> Platform.runLater(() -> { + // Clear once-per-day processing guards so delay logic re-runs + // against the freshly loaded database. + taskService.resetAfterReload(); + studyService.resetAfterReload(); + hideReloadOverlay(); refreshAllPanels(); })); diff --git a/src/test/java/com/studysync/config/DatabaseReloadServiceTest.java b/src/test/java/com/studysync/config/DatabaseReloadServiceTest.java new file mode 100644 index 0000000..2679524 --- /dev/null +++ b/src/test/java/com/studysync/config/DatabaseReloadServiceTest.java @@ -0,0 +1,274 @@ +package com.studysync.config; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.springframework.jdbc.core.JdbcTemplate; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Integration tests for {@link DatabaseReloadService}. + * + *

Uses a file-based H2 database (not in-memory) so we can test the full + * shutdown → file-replace → reconnect cycle that mirrors a Google Drive download. + */ +class DatabaseReloadServiceTest { + + @TempDir + Path tempDir; + + /** The "live" database that the app is connected to. */ + private Path liveDbFile; + private HikariDataSource dataSource; + private JdbcTemplate jdbcTemplate; + private DatabaseReloadService reloadService; + + @BeforeEach + void setUp() { + // H2 file path (without the .mv.db extension — H2 appends it automatically) + Path dbPath = tempDir.resolve("live"); + liveDbFile = tempDir.resolve("live.mv.db"); + + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:h2:file:" + dbPath.toAbsolutePath() + + ";DB_CLOSE_DELAY=-1;AUTO_RECONNECT=TRUE"); + config.setUsername("sa"); + config.setPassword(""); + config.setMaximumPoolSize(4); + config.setMinimumIdle(1); + + dataSource = new HikariDataSource(config); + jdbcTemplate = new JdbcTemplate(dataSource); + reloadService = new DatabaseReloadService(dataSource, jdbcTemplate); + + // Seed the live database with a simple table and one row + jdbcTemplate.execute("CREATE TABLE IF NOT EXISTS test_data (" + + "id INT PRIMARY KEY, val VARCHAR(100))"); + jdbcTemplate.update("INSERT INTO test_data (id, val) VALUES (1, 'original')"); + } + + @AfterEach + void tearDown() { + if (dataSource != null && !dataSource.isClosed()) { + try { + jdbcTemplate.execute("SHUTDOWN"); + } catch (Exception ignored) { + // expected + } + dataSource.close(); + } + } + + // ───────────────────────────────────────────── + // shutdown() tests + // ───────────────────────────────────────────── + + @Test + void shutdown_releasesFileLock() { + // Before shutdown, the file should exist and be locked + assertTrue(Files.exists(liveDbFile), "DB file should exist before shutdown"); + + reloadService.shutdown(); + + // After shutdown we should be able to replace the file (proves lock released) + assertDoesNotThrow(() -> + Files.writeString(liveDbFile, "dummy — proves file is unlocked")); + } + + @Test + void shutdown_thenReconnect_restoresSameData() { + // Verify the original data is there + String before = jdbcTemplate.queryForObject( + "SELECT val FROM test_data WHERE id = 1", String.class); + assertEquals("original", before); + + // Shutdown and reconnect to the same (unmodified) file + reloadService.shutdown(); + reloadService.reconnect(); + + // The same data should still be accessible + String after = jdbcTemplate.queryForObject( + "SELECT val FROM test_data WHERE id = 1", String.class); + assertEquals("original", after); + } + + // ───────────────────────────────────────────── + // Full reload cycle (shutdown → replace → reconnect) + // ───────────────────────────────────────────── + + @Test + void reloadPicksUpReplacementDatabase() throws IOException { + // 1. Create a second, independent database with different data + Path replacementBase = tempDir.resolve("replacement"); + Path replacementFile = tempDir.resolve("replacement.mv.db"); + createReplacementDatabase(replacementBase, "replaced-value"); + + // 2. Shutdown the live database + reloadService.shutdown(); + + // 3. Swap the file on disk (mirrors what GoogleDriveGateway does) + Files.move(replacementFile, liveDbFile, StandardCopyOption.REPLACE_EXISTING); + + // 4. Reconnect — should now read from the replacement file + reloadService.reconnect(); + + // 5. Verify the new data is visible + String value = jdbcTemplate.queryForObject( + "SELECT val FROM test_data WHERE id = 1", String.class); + assertEquals("replaced-value", value, + "After reload the query should return data from the replacement DB"); + } + + @Test + void reloadPicksUpReplacementWithMoreRows() throws IOException { + // Create replacement with multiple rows + Path replacementBase = tempDir.resolve("multi"); + Path replacementFile = tempDir.resolve("multi.mv.db"); + createReplacementDatabaseWithRows(replacementBase, 5); + + reloadService.shutdown(); + Files.move(replacementFile, liveDbFile, StandardCopyOption.REPLACE_EXISTING); + reloadService.reconnect(); + + Integer count = jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM test_data", Integer.class); + assertNotNull(count); + assertEquals(5, count, + "After reload the table should have the replacement row count"); + } + + @Test + void reloadDatabaseConvenienceMethod() throws IOException { + // Prepare replacement + Path replacementBase = tempDir.resolve("conv"); + Path replacementFile = tempDir.resolve("conv.mv.db"); + createReplacementDatabase(replacementBase, "via-convenience"); + + // Replace the file BEFORE calling reloadDatabase (file already swapped) + reloadService.shutdown(); + Files.move(replacementFile, liveDbFile, StandardCopyOption.REPLACE_EXISTING); + reloadService.reconnect(); + + String value = jdbcTemplate.queryForObject( + "SELECT val FROM test_data WHERE id = 1", String.class); + assertEquals("via-convenience", value); + } + + // ───────────────────────────────────────────── + // Reconnect resilience + // ───────────────────────────────────────────── + + @Test + void reconnectSucceedsAfterShutdownWithoutFileReplacement() { + reloadService.shutdown(); + + // Reconnect to the same file (no replacement) — should succeed + assertDoesNotThrow(() -> reloadService.reconnect()); + + // Verify data is intact + Integer count = jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM test_data", Integer.class); + assertEquals(1, count); + } + + @Test + void multipleShutdownReconnectCyclesAreStable() throws IOException { + for (int cycle = 1; cycle <= 3; cycle++) { + Path base = tempDir.resolve("cycle" + cycle); + Path file = tempDir.resolve("cycle" + cycle + ".mv.db"); + createReplacementDatabase(base, "cycle-" + cycle); + + reloadService.shutdown(); + Files.move(file, liveDbFile, StandardCopyOption.REPLACE_EXISTING); + reloadService.reconnect(); + + String value = jdbcTemplate.queryForObject( + "SELECT val FROM test_data WHERE id = 1", String.class); + assertEquals("cycle-" + cycle, value, + "Cycle " + cycle + " should show the correct replacement data"); + } + } + + @Test + void writesAfterReloadPersistCorrectly() throws IOException { + Path base = tempDir.resolve("persist"); + Path file = tempDir.resolve("persist.mv.db"); + createReplacementDatabase(base, "base"); + + reloadService.shutdown(); + Files.move(file, liveDbFile, StandardCopyOption.REPLACE_EXISTING); + reloadService.reconnect(); + + // Write new data after reload + jdbcTemplate.update("UPDATE test_data SET val = 'modified' WHERE id = 1"); + + // Read it back — should see the write, not the old replacement value + String value = jdbcTemplate.queryForObject( + "SELECT val FROM test_data WHERE id = 1", String.class); + assertEquals("modified", value, + "Writes after reload must be visible immediately"); + } + + // ───────────────────────────────────────────── + // Helpers + // ───────────────────────────────────────────── + + /** + * Creates a standalone H2 file database with a single test_data row. + */ + private void createReplacementDatabase(Path dbBase, String value) { + String url = "jdbc:h2:file:" + dbBase.toAbsolutePath(); + HikariConfig cfg = new HikariConfig(); + cfg.setJdbcUrl(url); + cfg.setUsername("sa"); + cfg.setPassword(""); + cfg.setMaximumPoolSize(1); + + try (HikariDataSource ds = new HikariDataSource(cfg)) { + JdbcTemplate tpl = new JdbcTemplate(ds); + tpl.execute("CREATE TABLE IF NOT EXISTS test_data (" + + "id INT PRIMARY KEY, val VARCHAR(100))"); + tpl.update("INSERT INTO test_data (id, val) VALUES (1, ?)", value); + tpl.execute("SHUTDOWN"); + } catch (Exception ignored) { + // SHUTDOWN kills the connection — expected + } + } + + /** + * Creates a standalone H2 file database with N rows. + */ + private void createReplacementDatabaseWithRows(Path dbBase, int rowCount) { + String url = "jdbc:h2:file:" + dbBase.toAbsolutePath(); + HikariConfig cfg = new HikariConfig(); + cfg.setJdbcUrl(url); + cfg.setUsername("sa"); + cfg.setPassword(""); + cfg.setMaximumPoolSize(1); + + try (HikariDataSource ds = new HikariDataSource(cfg)) { + JdbcTemplate tpl = new JdbcTemplate(ds); + tpl.execute("CREATE TABLE IF NOT EXISTS test_data (" + + "id INT PRIMARY KEY, val VARCHAR(100))"); + for (int i = 1; i <= rowCount; i++) { + tpl.update("INSERT INTO test_data (id, val) VALUES (?, ?)", + i, "row-" + i); + } + tpl.execute("SHUTDOWN"); + } catch (Exception ignored) { + // SHUTDOWN kills the connection — expected + } + } +}