diff --git a/backend/src/main/java/com/storycove/config/DatabaseMigrationRunner.java b/backend/src/main/java/com/storycove/config/DatabaseMigrationRunner.java new file mode 100644 index 0000000..9e2839e --- /dev/null +++ b/backend/src/main/java/com/storycove/config/DatabaseMigrationRunner.java @@ -0,0 +1,111 @@ +package com.storycove.config; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.CommandLineRunner; +import org.springframework.core.annotation.Order; +import org.springframework.stereotype.Component; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.Statement; +import java.util.Arrays; +import java.util.List; + +/** + * Runs database migrations on application startup. + * This ensures all library databases have the required schema, + * particularly for tables like backup_jobs that were added after initial deployment. + */ +@Component +@Order(1) // Run early in startup sequence +public class DatabaseMigrationRunner implements CommandLineRunner { + + private static final Logger logger = LoggerFactory.getLogger(DatabaseMigrationRunner.class); + + @Autowired + private DataSource dataSource; + + @Value("${spring.datasource.username}") + private String dbUsername; + + @Value("${spring.datasource.password}") + private String dbPassword; + + // List of all library databases that need migrations + private static final List LIBRARY_DATABASES = Arrays.asList( + "storycove", // default database + "storycove_afterdark", + "storycove_clas", + "storycove_secret" + ); + + // SQL for backup_jobs table migration (idempotent) + private static final String BACKUP_JOBS_MIGRATION = """ + CREATE TABLE IF NOT EXISTS backup_jobs ( + id UUID PRIMARY KEY, + library_id VARCHAR(255) NOT NULL, + type VARCHAR(50) NOT NULL CHECK (type IN ('DATABASE_ONLY', 'COMPLETE')), + status VARCHAR(50) NOT NULL CHECK (status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED', 'EXPIRED')), + file_path VARCHAR(1000), + file_size_bytes BIGINT, + progress_percent INTEGER, + error_message VARCHAR(1000), + created_at TIMESTAMP NOT NULL, + started_at TIMESTAMP, + completed_at TIMESTAMP, + expires_at TIMESTAMP + ); + + CREATE INDEX IF NOT EXISTS idx_backup_jobs_library_id ON backup_jobs(library_id); + CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status); + CREATE INDEX IF NOT EXISTS idx_backup_jobs_expires_at ON backup_jobs(expires_at); + CREATE INDEX IF NOT EXISTS idx_backup_jobs_created_at ON backup_jobs(created_at DESC); + """; + + @Override + public void run(String... args) throws Exception { + logger.info("🗄️ Starting database migrations..."); + + for (String database : LIBRARY_DATABASES) { + try { + applyMigrations(database); + logger.info("✅ Successfully applied migrations to database: {}", database); + } catch (Exception e) { + // Log error but don't fail startup if database doesn't exist yet + if (e.getMessage() != null && e.getMessage().contains("does not exist")) { + logger.warn("⚠️ Database {} does not exist yet, skipping migrations", database); + } else { + logger.error("❌ Failed to apply migrations to database: {}", database, e); + // Don't throw - allow application to start even if some migrations fail + } + } + } + + logger.info("✅ Database migrations completed"); + } + + private void applyMigrations(String database) throws Exception { + // We need to connect directly to each database, not through SmartRoutingDataSource + // Build connection URL from the default datasource URL + String originalUrl = dataSource.getConnection().getMetaData().getURL(); + String baseUrl = originalUrl.substring(0, originalUrl.lastIndexOf('/')); + String targetUrl = baseUrl + "/" + database; + + // Connect directly to target database using credentials from application properties + try (Connection conn = java.sql.DriverManager.getConnection( + targetUrl, + dbUsername, + dbPassword + )) { + // Apply backup_jobs migration + try (Statement stmt = conn.createStatement()) { + stmt.execute(BACKUP_JOBS_MIGRATION); + } + + logger.debug("Applied backup_jobs migration to {}", database); + } + } +} diff --git a/docker-compose.yml b/docker-compose.yml index 00c374e..24d5981 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -46,7 +46,7 @@ services: - library_config:/app/config depends_on: postgres: - condition: service_started + condition: service_healthy solr: condition: service_started networks: @@ -71,35 +71,6 @@ services: timeout: 5s retries: 5 - migrations: - image: postgres:15-alpine - depends_on: - postgres: - condition: service_healthy - volumes: - - ./backend/create_backup_jobs_table.sql:/migrations/create_backup_jobs_table.sql:ro - networks: - - storycove-network - environment: - - PGPASSWORD=${DB_PASSWORD} - entrypoint: /bin/sh - command: > - -c " - echo '🗄️ Applying database migrations...'; - for DB in storycove storycove_afterdark storycove_clas storycove_secret; do - if psql -h postgres -U storycove -lqt | cut -d '|' -f 1 | grep -qw \$$DB; then - echo \" ✓ Applying migration to \$$DB...\"; - psql -h postgres -U storycove -d \$$DB -f /migrations/create_backup_jobs_table.sql > /dev/null 2>&1 || true; - fi; - done; - echo '✅ Migrations complete!'; - sleep infinity; - " - deploy: - restart_policy: - condition: on-failure - max_attempts: 1 - solr: build: