Merge branch 'main' into statistics
This commit is contained in:
45
apply_migration_production.sh
Executable file
45
apply_migration_production.sh
Executable file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Run this script on your production server to apply the backup_jobs table migration
|
||||||
|
# to all library databases
|
||||||
|
|
||||||
|
echo "Applying backup_jobs table migration to all databases..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Apply to each database
|
||||||
|
for DB in storycove storycove_afterdark storycove_clas storycove_secret; do
|
||||||
|
echo "Applying to $DB..."
|
||||||
|
docker-compose exec -T postgres psql -U storycove -d "$DB" <<'SQL'
|
||||||
|
CREATE TABLE IF NOT EXISTS backup_jobs (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
library_id VARCHAR(255) NOT NULL,
|
||||||
|
type VARCHAR(50) NOT NULL CHECK (type IN ('DATABASE_ONLY', 'COMPLETE')),
|
||||||
|
status VARCHAR(50) NOT NULL CHECK (status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED', 'EXPIRED')),
|
||||||
|
file_path VARCHAR(1000),
|
||||||
|
file_size_bytes BIGINT,
|
||||||
|
progress_percent INTEGER,
|
||||||
|
error_message VARCHAR(1000),
|
||||||
|
created_at TIMESTAMP NOT NULL,
|
||||||
|
started_at TIMESTAMP,
|
||||||
|
completed_at TIMESTAMP,
|
||||||
|
expires_at TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_library_id ON backup_jobs(library_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_expires_at ON backup_jobs(expires_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_created_at ON backup_jobs(created_at DESC);
|
||||||
|
SQL
|
||||||
|
echo "✓ Done with $DB"
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Migration complete! Verifying..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Verify tables exist
|
||||||
|
for DB in storycove storycove_afterdark storycove_clas storycove_secret; do
|
||||||
|
echo "Checking $DB:"
|
||||||
|
docker-compose exec -T postgres psql -U storycove -d "$DB" -c "\d backup_jobs" 2>&1 | grep -E "Table|does not exist" || echo " ✓ Table exists"
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
54
backend/apply_backup_jobs_migration.sh
Executable file
54
backend/apply_backup_jobs_migration.sh
Executable file
@@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to apply backup_jobs table migration to all library databases
|
||||||
|
# This should be run from the backend directory
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Use full docker path
|
||||||
|
DOCKER="/usr/local/bin/docker"
|
||||||
|
|
||||||
|
echo "Applying backup_jobs table migration..."
|
||||||
|
|
||||||
|
# Get database connection details from environment or use defaults
|
||||||
|
DB_HOST="${POSTGRES_HOST:-postgres}"
|
||||||
|
DB_PORT="${POSTGRES_PORT:-5432}"
|
||||||
|
DB_USER="${POSTGRES_USER:-storycove}"
|
||||||
|
DB_PASSWORD="${POSTGRES_PASSWORD:-password}"
|
||||||
|
|
||||||
|
# List of databases to update
|
||||||
|
DATABASES=("storycove" "storycove_afterdark")
|
||||||
|
|
||||||
|
for DB_NAME in "${DATABASES[@]}"; do
|
||||||
|
echo ""
|
||||||
|
echo "Applying migration to database: $DB_NAME"
|
||||||
|
|
||||||
|
# Check if database exists
|
||||||
|
if $DOCKER exec storycove-postgres-1 psql -U "$DB_USER" -lqt | cut -d \| -f 1 | grep -qw "$DB_NAME"; then
|
||||||
|
echo "Database $DB_NAME exists, applying migration..."
|
||||||
|
|
||||||
|
# Apply migration
|
||||||
|
$DOCKER exec -i storycove-postgres-1 psql -U "$DB_USER" -d "$DB_NAME" < create_backup_jobs_table.sql
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "✓ Migration applied successfully to $DB_NAME"
|
||||||
|
else
|
||||||
|
echo "✗ Failed to apply migration to $DB_NAME"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "⚠ Database $DB_NAME does not exist, skipping..."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Migration complete!"
|
||||||
|
echo ""
|
||||||
|
echo "Verifying table creation..."
|
||||||
|
for DB_NAME in "${DATABASES[@]}"; do
|
||||||
|
if $DOCKER exec storycove-postgres-1 psql -U "$DB_USER" -lqt | cut -d \| -f 1 | grep -qw "$DB_NAME"; then
|
||||||
|
echo ""
|
||||||
|
echo "Checking $DB_NAME:"
|
||||||
|
$DOCKER exec storycove-postgres-1 psql -U "$DB_USER" -d "$DB_NAME" -c "\d backup_jobs" 2>/dev/null || echo " Table not found in $DB_NAME"
|
||||||
|
fi
|
||||||
|
done
|
||||||
29
backend/create_backup_jobs_table.sql
Normal file
29
backend/create_backup_jobs_table.sql
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
-- Create backup_jobs table for async backup job tracking
|
||||||
|
-- This should be run on all library databases (default and afterdark)
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS backup_jobs (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
library_id VARCHAR(255) NOT NULL,
|
||||||
|
type VARCHAR(50) NOT NULL CHECK (type IN ('DATABASE_ONLY', 'COMPLETE')),
|
||||||
|
status VARCHAR(50) NOT NULL CHECK (status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED', 'EXPIRED')),
|
||||||
|
file_path VARCHAR(1000),
|
||||||
|
file_size_bytes BIGINT,
|
||||||
|
progress_percent INTEGER,
|
||||||
|
error_message VARCHAR(1000),
|
||||||
|
created_at TIMESTAMP NOT NULL,
|
||||||
|
started_at TIMESTAMP,
|
||||||
|
completed_at TIMESTAMP,
|
||||||
|
expires_at TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Create index on library_id for faster lookups
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_library_id ON backup_jobs(library_id);
|
||||||
|
|
||||||
|
-- Create index on status for cleanup queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status);
|
||||||
|
|
||||||
|
-- Create index on expires_at for cleanup queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_expires_at ON backup_jobs(expires_at);
|
||||||
|
|
||||||
|
-- Create index on created_at for ordering
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_created_at ON backup_jobs(created_at DESC);
|
||||||
@@ -0,0 +1,111 @@
|
|||||||
|
package com.storycove.config;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.boot.CommandLineRunner;
|
||||||
|
import org.springframework.core.annotation.Order;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.sql.DataSource;
|
||||||
|
import java.sql.Connection;
|
||||||
|
import java.sql.Statement;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs database migrations on application startup.
|
||||||
|
* This ensures all library databases have the required schema,
|
||||||
|
* particularly for tables like backup_jobs that were added after initial deployment.
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
@Order(1) // Run early in startup sequence
|
||||||
|
public class DatabaseMigrationRunner implements CommandLineRunner {
|
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(DatabaseMigrationRunner.class);
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private DataSource dataSource;
|
||||||
|
|
||||||
|
@Value("${spring.datasource.username}")
|
||||||
|
private String dbUsername;
|
||||||
|
|
||||||
|
@Value("${spring.datasource.password}")
|
||||||
|
private String dbPassword;
|
||||||
|
|
||||||
|
// List of all library databases that need migrations
|
||||||
|
private static final List<String> LIBRARY_DATABASES = Arrays.asList(
|
||||||
|
"storycove", // default database
|
||||||
|
"storycove_afterdark",
|
||||||
|
"storycove_clas",
|
||||||
|
"storycove_secret"
|
||||||
|
);
|
||||||
|
|
||||||
|
// SQL for backup_jobs table migration (idempotent)
|
||||||
|
private static final String BACKUP_JOBS_MIGRATION = """
|
||||||
|
CREATE TABLE IF NOT EXISTS backup_jobs (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
library_id VARCHAR(255) NOT NULL,
|
||||||
|
type VARCHAR(50) NOT NULL CHECK (type IN ('DATABASE_ONLY', 'COMPLETE')),
|
||||||
|
status VARCHAR(50) NOT NULL CHECK (status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED', 'EXPIRED')),
|
||||||
|
file_path VARCHAR(1000),
|
||||||
|
file_size_bytes BIGINT,
|
||||||
|
progress_percent INTEGER,
|
||||||
|
error_message VARCHAR(1000),
|
||||||
|
created_at TIMESTAMP NOT NULL,
|
||||||
|
started_at TIMESTAMP,
|
||||||
|
completed_at TIMESTAMP,
|
||||||
|
expires_at TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_library_id ON backup_jobs(library_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_expires_at ON backup_jobs(expires_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_backup_jobs_created_at ON backup_jobs(created_at DESC);
|
||||||
|
""";
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run(String... args) throws Exception {
|
||||||
|
logger.info("🗄️ Starting database migrations...");
|
||||||
|
|
||||||
|
for (String database : LIBRARY_DATABASES) {
|
||||||
|
try {
|
||||||
|
applyMigrations(database);
|
||||||
|
logger.info("✅ Successfully applied migrations to database: {}", database);
|
||||||
|
} catch (Exception e) {
|
||||||
|
// Log error but don't fail startup if database doesn't exist yet
|
||||||
|
if (e.getMessage() != null && e.getMessage().contains("does not exist")) {
|
||||||
|
logger.warn("⚠️ Database {} does not exist yet, skipping migrations", database);
|
||||||
|
} else {
|
||||||
|
logger.error("❌ Failed to apply migrations to database: {}", database, e);
|
||||||
|
// Don't throw - allow application to start even if some migrations fail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("✅ Database migrations completed");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void applyMigrations(String database) throws Exception {
|
||||||
|
// We need to connect directly to each database, not through SmartRoutingDataSource
|
||||||
|
// Build connection URL from the default datasource URL
|
||||||
|
String originalUrl = dataSource.getConnection().getMetaData().getURL();
|
||||||
|
String baseUrl = originalUrl.substring(0, originalUrl.lastIndexOf('/'));
|
||||||
|
String targetUrl = baseUrl + "/" + database;
|
||||||
|
|
||||||
|
// Connect directly to target database using credentials from application properties
|
||||||
|
try (Connection conn = java.sql.DriverManager.getConnection(
|
||||||
|
targetUrl,
|
||||||
|
dbUsername,
|
||||||
|
dbPassword
|
||||||
|
)) {
|
||||||
|
// Apply backup_jobs migration
|
||||||
|
try (Statement stmt = conn.createStatement()) {
|
||||||
|
stmt.execute(BACKUP_JOBS_MIGRATION);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug("Applied backup_jobs migration to {}", database);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
package com.storycove.controller;
|
package com.storycove.controller;
|
||||||
|
|
||||||
|
import com.storycove.service.AsyncBackupService;
|
||||||
import com.storycove.service.DatabaseManagementService;
|
import com.storycove.service.DatabaseManagementService;
|
||||||
|
import com.storycove.service.LibraryService;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
import org.springframework.core.io.Resource;
|
import org.springframework.core.io.Resource;
|
||||||
import org.springframework.http.HttpHeaders;
|
import org.springframework.http.HttpHeaders;
|
||||||
@@ -12,6 +14,7 @@ import org.springframework.web.multipart.MultipartFile;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.time.LocalDateTime;
|
import java.time.LocalDateTime;
|
||||||
import java.time.format.DateTimeFormatter;
|
import java.time.format.DateTimeFormatter;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
@RestController
|
@RestController
|
||||||
@@ -21,6 +24,12 @@ public class DatabaseController {
|
|||||||
@Autowired
|
@Autowired
|
||||||
private DatabaseManagementService databaseManagementService;
|
private DatabaseManagementService databaseManagementService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private AsyncBackupService asyncBackupService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private LibraryService libraryService;
|
||||||
|
|
||||||
@PostMapping("/backup")
|
@PostMapping("/backup")
|
||||||
public ResponseEntity<Resource> backupDatabase() {
|
public ResponseEntity<Resource> backupDatabase() {
|
||||||
try {
|
try {
|
||||||
@@ -83,19 +92,141 @@ public class DatabaseController {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@PostMapping("/backup-complete")
|
@PostMapping("/backup-complete")
|
||||||
public ResponseEntity<Resource> backupComplete() {
|
public ResponseEntity<Map<String, Object>> backupCompleteAsync() {
|
||||||
try {
|
try {
|
||||||
Resource backup = databaseManagementService.createCompleteBackup();
|
String libraryId = libraryService.getCurrentLibraryId();
|
||||||
|
if (libraryId == null) {
|
||||||
|
return ResponseEntity.badRequest()
|
||||||
|
.body(Map.of("success", false, "message", "No library selected"));
|
||||||
|
}
|
||||||
|
|
||||||
String timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd_HH-mm-ss"));
|
// Start backup job asynchronously
|
||||||
String filename = "storycove_complete_backup_" + timestamp + ".zip";
|
com.storycove.entity.BackupJob job = asyncBackupService.startBackupJob(
|
||||||
|
libraryId,
|
||||||
|
com.storycove.entity.BackupJob.BackupType.COMPLETE
|
||||||
|
);
|
||||||
|
|
||||||
|
return ResponseEntity.ok(Map.of(
|
||||||
|
"success", true,
|
||||||
|
"message", "Backup started",
|
||||||
|
"jobId", job.getId().toString(),
|
||||||
|
"status", job.getStatus().toString()
|
||||||
|
));
|
||||||
|
} catch (Exception e) {
|
||||||
|
return ResponseEntity.internalServerError()
|
||||||
|
.body(Map.of("success", false, "message", "Failed to start backup: " + e.getMessage()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@GetMapping("/backup-status/{jobId}")
|
||||||
|
public ResponseEntity<Map<String, Object>> getBackupStatus(@PathVariable String jobId) {
|
||||||
|
try {
|
||||||
|
java.util.UUID uuid = java.util.UUID.fromString(jobId);
|
||||||
|
java.util.Optional<com.storycove.entity.BackupJob> jobOpt = asyncBackupService.getJobStatus(uuid);
|
||||||
|
|
||||||
|
if (jobOpt.isEmpty()) {
|
||||||
|
return ResponseEntity.notFound().build();
|
||||||
|
}
|
||||||
|
|
||||||
|
com.storycove.entity.BackupJob job = jobOpt.get();
|
||||||
|
|
||||||
|
return ResponseEntity.ok(Map.of(
|
||||||
|
"success", true,
|
||||||
|
"jobId", job.getId().toString(),
|
||||||
|
"status", job.getStatus().toString(),
|
||||||
|
"progress", job.getProgressPercent(),
|
||||||
|
"fileSizeBytes", job.getFileSizeBytes() != null ? job.getFileSizeBytes() : 0,
|
||||||
|
"createdAt", job.getCreatedAt().toString(),
|
||||||
|
"completedAt", job.getCompletedAt() != null ? job.getCompletedAt().toString() : "",
|
||||||
|
"errorMessage", job.getErrorMessage() != null ? job.getErrorMessage() : ""
|
||||||
|
));
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
return ResponseEntity.badRequest()
|
||||||
|
.body(Map.of("success", false, "message", "Invalid job ID"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@GetMapping("/backup-download/{jobId}")
|
||||||
|
public ResponseEntity<Resource> downloadBackup(@PathVariable String jobId) {
|
||||||
|
try {
|
||||||
|
java.util.UUID uuid = java.util.UUID.fromString(jobId);
|
||||||
|
Resource backup = asyncBackupService.getBackupFile(uuid);
|
||||||
|
|
||||||
|
java.util.Optional<com.storycove.entity.BackupJob> jobOpt = asyncBackupService.getJobStatus(uuid);
|
||||||
|
if (jobOpt.isEmpty()) {
|
||||||
|
return ResponseEntity.notFound().build();
|
||||||
|
}
|
||||||
|
|
||||||
|
com.storycove.entity.BackupJob job = jobOpt.get();
|
||||||
|
String timestamp = job.getCreatedAt().format(DateTimeFormatter.ofPattern("yyyy-MM-dd_HH-mm-ss"));
|
||||||
|
String extension = job.getType() == com.storycove.entity.BackupJob.BackupType.COMPLETE ? "zip" : "sql";
|
||||||
|
String filename = "storycove_backup_" + timestamp + "." + extension;
|
||||||
|
|
||||||
return ResponseEntity.ok()
|
return ResponseEntity.ok()
|
||||||
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + filename + "\"")
|
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + filename + "\"")
|
||||||
.header(HttpHeaders.CONTENT_TYPE, "application/zip")
|
.header(HttpHeaders.CONTENT_TYPE,
|
||||||
|
job.getType() == com.storycove.entity.BackupJob.BackupType.COMPLETE
|
||||||
|
? "application/zip"
|
||||||
|
: "application/sql")
|
||||||
.body(backup);
|
.body(backup);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
return ResponseEntity.badRequest().build();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new RuntimeException("Failed to create complete backup: " + e.getMessage(), e);
|
throw new RuntimeException("Failed to download backup: " + e.getMessage(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@GetMapping("/backup-list")
|
||||||
|
public ResponseEntity<Map<String, Object>> listBackups() {
|
||||||
|
try {
|
||||||
|
String libraryId = libraryService.getCurrentLibraryId();
|
||||||
|
if (libraryId == null) {
|
||||||
|
return ResponseEntity.badRequest()
|
||||||
|
.body(Map.of("success", false, "message", "No library selected"));
|
||||||
|
}
|
||||||
|
|
||||||
|
List<com.storycove.entity.BackupJob> jobs = asyncBackupService.listBackupJobs(libraryId);
|
||||||
|
|
||||||
|
List<Map<String, Object>> jobsList = jobs.stream()
|
||||||
|
.map(job -> {
|
||||||
|
Map<String, Object> jobMap = new java.util.HashMap<>();
|
||||||
|
jobMap.put("jobId", job.getId().toString());
|
||||||
|
jobMap.put("type", job.getType().toString());
|
||||||
|
jobMap.put("status", job.getStatus().toString());
|
||||||
|
jobMap.put("progress", job.getProgressPercent());
|
||||||
|
jobMap.put("fileSizeBytes", job.getFileSizeBytes() != null ? job.getFileSizeBytes() : 0L);
|
||||||
|
jobMap.put("createdAt", job.getCreatedAt().toString());
|
||||||
|
jobMap.put("completedAt", job.getCompletedAt() != null ? job.getCompletedAt().toString() : "");
|
||||||
|
return jobMap;
|
||||||
|
})
|
||||||
|
.collect(java.util.stream.Collectors.toList());
|
||||||
|
|
||||||
|
return ResponseEntity.ok(Map.of(
|
||||||
|
"success", true,
|
||||||
|
"backups", jobsList
|
||||||
|
));
|
||||||
|
} catch (Exception e) {
|
||||||
|
return ResponseEntity.internalServerError()
|
||||||
|
.body(Map.of("success", false, "message", "Failed to list backups: " + e.getMessage()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@DeleteMapping("/backup/{jobId}")
|
||||||
|
public ResponseEntity<Map<String, Object>> deleteBackup(@PathVariable String jobId) {
|
||||||
|
try {
|
||||||
|
java.util.UUID uuid = java.util.UUID.fromString(jobId);
|
||||||
|
asyncBackupService.deleteBackupJob(uuid);
|
||||||
|
|
||||||
|
return ResponseEntity.ok(Map.of(
|
||||||
|
"success", true,
|
||||||
|
"message", "Backup deleted successfully"
|
||||||
|
));
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
return ResponseEntity.badRequest()
|
||||||
|
.body(Map.of("success", false, "message", "Invalid job ID"));
|
||||||
|
} catch (Exception e) {
|
||||||
|
return ResponseEntity.internalServerError()
|
||||||
|
.body(Map.of("success", false, "message", "Failed to delete backup: " + e.getMessage()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
195
backend/src/main/java/com/storycove/entity/BackupJob.java
Normal file
195
backend/src/main/java/com/storycove/entity/BackupJob.java
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
package com.storycove.entity;
|
||||||
|
|
||||||
|
import jakarta.persistence.*;
|
||||||
|
import java.time.LocalDateTime;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
@Entity
|
||||||
|
@Table(name = "backup_jobs")
|
||||||
|
public class BackupJob {
|
||||||
|
|
||||||
|
@Id
|
||||||
|
@GeneratedValue(strategy = GenerationType.UUID)
|
||||||
|
private UUID id;
|
||||||
|
|
||||||
|
@Column(nullable = false)
|
||||||
|
private String libraryId;
|
||||||
|
|
||||||
|
@Column(nullable = false)
|
||||||
|
@Enumerated(EnumType.STRING)
|
||||||
|
private BackupType type;
|
||||||
|
|
||||||
|
@Column(nullable = false)
|
||||||
|
@Enumerated(EnumType.STRING)
|
||||||
|
private BackupStatus status;
|
||||||
|
|
||||||
|
@Column
|
||||||
|
private String filePath;
|
||||||
|
|
||||||
|
@Column
|
||||||
|
private Long fileSizeBytes;
|
||||||
|
|
||||||
|
@Column
|
||||||
|
private Integer progressPercent;
|
||||||
|
|
||||||
|
@Column(length = 1000)
|
||||||
|
private String errorMessage;
|
||||||
|
|
||||||
|
@Column(nullable = false)
|
||||||
|
private LocalDateTime createdAt;
|
||||||
|
|
||||||
|
@Column
|
||||||
|
private LocalDateTime startedAt;
|
||||||
|
|
||||||
|
@Column
|
||||||
|
private LocalDateTime completedAt;
|
||||||
|
|
||||||
|
@Column
|
||||||
|
private LocalDateTime expiresAt;
|
||||||
|
|
||||||
|
@PrePersist
|
||||||
|
protected void onCreate() {
|
||||||
|
createdAt = LocalDateTime.now();
|
||||||
|
// Backups expire after 24 hours
|
||||||
|
expiresAt = LocalDateTime.now().plusDays(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enums
|
||||||
|
public enum BackupType {
|
||||||
|
DATABASE_ONLY,
|
||||||
|
COMPLETE
|
||||||
|
}
|
||||||
|
|
||||||
|
public enum BackupStatus {
|
||||||
|
PENDING,
|
||||||
|
IN_PROGRESS,
|
||||||
|
COMPLETED,
|
||||||
|
FAILED,
|
||||||
|
EXPIRED
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constructors
|
||||||
|
public BackupJob() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public BackupJob(String libraryId, BackupType type) {
|
||||||
|
this.libraryId = libraryId;
|
||||||
|
this.type = type;
|
||||||
|
this.status = BackupStatus.PENDING;
|
||||||
|
this.progressPercent = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getters and Setters
|
||||||
|
public UUID getId() {
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setId(UUID id) {
|
||||||
|
this.id = id;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getLibraryId() {
|
||||||
|
return libraryId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setLibraryId(String libraryId) {
|
||||||
|
this.libraryId = libraryId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public BackupType getType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setType(BackupType type) {
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public BackupStatus getStatus() {
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setStatus(BackupStatus status) {
|
||||||
|
this.status = status;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getFilePath() {
|
||||||
|
return filePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setFilePath(String filePath) {
|
||||||
|
this.filePath = filePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Long getFileSizeBytes() {
|
||||||
|
return fileSizeBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setFileSizeBytes(Long fileSizeBytes) {
|
||||||
|
this.fileSizeBytes = fileSizeBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Integer getProgressPercent() {
|
||||||
|
return progressPercent;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setProgressPercent(Integer progressPercent) {
|
||||||
|
this.progressPercent = progressPercent;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getErrorMessage() {
|
||||||
|
return errorMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setErrorMessage(String errorMessage) {
|
||||||
|
this.errorMessage = errorMessage;
|
||||||
|
}
|
||||||
|
|
||||||
|
public LocalDateTime getCreatedAt() {
|
||||||
|
return createdAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCreatedAt(LocalDateTime createdAt) {
|
||||||
|
this.createdAt = createdAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public LocalDateTime getStartedAt() {
|
||||||
|
return startedAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setStartedAt(LocalDateTime startedAt) {
|
||||||
|
this.startedAt = startedAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public LocalDateTime getCompletedAt() {
|
||||||
|
return completedAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCompletedAt(LocalDateTime completedAt) {
|
||||||
|
this.completedAt = completedAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public LocalDateTime getExpiresAt() {
|
||||||
|
return expiresAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setExpiresAt(LocalDateTime expiresAt) {
|
||||||
|
this.expiresAt = expiresAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper methods
|
||||||
|
public boolean isExpired() {
|
||||||
|
return LocalDateTime.now().isAfter(expiresAt);
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isCompleted() {
|
||||||
|
return status == BackupStatus.COMPLETED;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isFailed() {
|
||||||
|
return status == BackupStatus.FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isInProgress() {
|
||||||
|
return status == BackupStatus.IN_PROGRESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
package com.storycove.repository;
|
||||||
|
|
||||||
|
import com.storycove.entity.BackupJob;
|
||||||
|
import org.springframework.data.jpa.repository.JpaRepository;
|
||||||
|
import org.springframework.data.jpa.repository.Modifying;
|
||||||
|
import org.springframework.data.jpa.repository.Query;
|
||||||
|
import org.springframework.data.repository.query.Param;
|
||||||
|
import org.springframework.stereotype.Repository;
|
||||||
|
|
||||||
|
import java.time.LocalDateTime;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
@Repository
|
||||||
|
public interface BackupJobRepository extends JpaRepository<BackupJob, UUID> {
|
||||||
|
|
||||||
|
List<BackupJob> findByLibraryIdOrderByCreatedAtDesc(String libraryId);
|
||||||
|
|
||||||
|
@Query("SELECT bj FROM BackupJob bj WHERE bj.expiresAt < :now AND bj.status = 'COMPLETED'")
|
||||||
|
List<BackupJob> findExpiredJobs(@Param("now") LocalDateTime now);
|
||||||
|
|
||||||
|
@Modifying
|
||||||
|
@Query("UPDATE BackupJob bj SET bj.status = 'EXPIRED' WHERE bj.expiresAt < :now AND bj.status = 'COMPLETED'")
|
||||||
|
int markExpiredJobs(@Param("now") LocalDateTime now);
|
||||||
|
}
|
||||||
@@ -87,6 +87,9 @@ public interface StoryRepository extends JpaRepository<Story, UUID> {
|
|||||||
@Query("SELECT COUNT(s) FROM Story s WHERE s.createdAt >= :since")
|
@Query("SELECT COUNT(s) FROM Story s WHERE s.createdAt >= :since")
|
||||||
long countStoriesCreatedSince(@Param("since") LocalDateTime since);
|
long countStoriesCreatedSince(@Param("since") LocalDateTime since);
|
||||||
|
|
||||||
|
@Query("SELECT COUNT(s) FROM Story s WHERE s.createdAt >= :since OR s.updatedAt >= :since")
|
||||||
|
long countStoriesModifiedAfter(@Param("since") LocalDateTime since);
|
||||||
|
|
||||||
@Query("SELECT AVG(s.wordCount) FROM Story s")
|
@Query("SELECT AVG(s.wordCount) FROM Story s")
|
||||||
Double findAverageWordCount();
|
Double findAverageWordCount();
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,125 @@
|
|||||||
|
package com.storycove.service;
|
||||||
|
|
||||||
|
import com.storycove.entity.BackupJob;
|
||||||
|
import com.storycove.repository.BackupJobRepository;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.core.io.Resource;
|
||||||
|
import org.springframework.scheduling.annotation.Async;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
import org.springframework.transaction.annotation.Propagation;
|
||||||
|
import org.springframework.transaction.annotation.Transactional;
|
||||||
|
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
import java.time.LocalDateTime;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Separate service for async backup execution.
|
||||||
|
* This is needed because @Async doesn't work when called from within the same class.
|
||||||
|
*/
|
||||||
|
@Service
|
||||||
|
public class AsyncBackupExecutor {
|
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(AsyncBackupExecutor.class);
|
||||||
|
|
||||||
|
@Value("${storycove.upload.dir:/app/images}")
|
||||||
|
private String uploadDir;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private BackupJobRepository backupJobRepository;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private DatabaseManagementService databaseManagementService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private LibraryService libraryService;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute backup asynchronously.
|
||||||
|
* This method MUST be in a separate service class for @Async to work properly.
|
||||||
|
*/
|
||||||
|
@Async
|
||||||
|
@Transactional(propagation = Propagation.REQUIRES_NEW)
|
||||||
|
public void executeBackupAsync(UUID jobId) {
|
||||||
|
logger.info("Async executor starting for job {}", jobId);
|
||||||
|
|
||||||
|
Optional<BackupJob> jobOpt = backupJobRepository.findById(jobId);
|
||||||
|
if (jobOpt.isEmpty()) {
|
||||||
|
logger.error("Backup job not found: {}", jobId);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupJob job = jobOpt.get();
|
||||||
|
job.setStatus(BackupJob.BackupStatus.IN_PROGRESS);
|
||||||
|
job.setStartedAt(LocalDateTime.now());
|
||||||
|
job.setProgressPercent(0);
|
||||||
|
backupJobRepository.save(job);
|
||||||
|
|
||||||
|
try {
|
||||||
|
logger.info("Starting backup job {} for library {}", job.getId(), job.getLibraryId());
|
||||||
|
|
||||||
|
// Switch to the correct library
|
||||||
|
if (!job.getLibraryId().equals(libraryService.getCurrentLibraryId())) {
|
||||||
|
libraryService.switchToLibraryAfterAuthentication(job.getLibraryId());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create backup file
|
||||||
|
Path backupDir = Paths.get(uploadDir, "backups", job.getLibraryId());
|
||||||
|
Files.createDirectories(backupDir);
|
||||||
|
|
||||||
|
String filename = String.format("backup_%s_%s.%s",
|
||||||
|
job.getId().toString(),
|
||||||
|
LocalDateTime.now().toString().replaceAll(":", "-"),
|
||||||
|
job.getType() == BackupJob.BackupType.COMPLETE ? "zip" : "sql");
|
||||||
|
|
||||||
|
Path backupFile = backupDir.resolve(filename);
|
||||||
|
|
||||||
|
job.setProgressPercent(10);
|
||||||
|
backupJobRepository.save(job);
|
||||||
|
|
||||||
|
// Create the backup
|
||||||
|
Resource backupResource;
|
||||||
|
if (job.getType() == BackupJob.BackupType.COMPLETE) {
|
||||||
|
backupResource = databaseManagementService.createCompleteBackup();
|
||||||
|
} else {
|
||||||
|
backupResource = databaseManagementService.createBackup();
|
||||||
|
}
|
||||||
|
|
||||||
|
job.setProgressPercent(80);
|
||||||
|
backupJobRepository.save(job);
|
||||||
|
|
||||||
|
// Copy resource to permanent file
|
||||||
|
try (var inputStream = backupResource.getInputStream();
|
||||||
|
var outputStream = Files.newOutputStream(backupFile)) {
|
||||||
|
inputStream.transferTo(outputStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
job.setProgressPercent(95);
|
||||||
|
backupJobRepository.save(job);
|
||||||
|
|
||||||
|
// Set file info
|
||||||
|
job.setFilePath(backupFile.toString());
|
||||||
|
job.setFileSizeBytes(Files.size(backupFile));
|
||||||
|
job.setStatus(BackupJob.BackupStatus.COMPLETED);
|
||||||
|
job.setCompletedAt(LocalDateTime.now());
|
||||||
|
job.setProgressPercent(100);
|
||||||
|
|
||||||
|
logger.info("Backup job {} completed successfully. File size: {} bytes",
|
||||||
|
job.getId(), job.getFileSizeBytes());
|
||||||
|
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.error("Backup job {} failed", job.getId(), e);
|
||||||
|
job.setStatus(BackupJob.BackupStatus.FAILED);
|
||||||
|
job.setErrorMessage(e.getMessage());
|
||||||
|
job.setCompletedAt(LocalDateTime.now());
|
||||||
|
} finally {
|
||||||
|
backupJobRepository.save(job);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,167 @@
|
|||||||
|
package com.storycove.service;
|
||||||
|
|
||||||
|
import com.storycove.entity.BackupJob;
|
||||||
|
import com.storycove.repository.BackupJobRepository;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.core.io.FileSystemResource;
|
||||||
|
import org.springframework.core.io.Resource;
|
||||||
|
import org.springframework.scheduling.annotation.Scheduled;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
import org.springframework.transaction.annotation.Transactional;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
import java.time.LocalDateTime;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
@Service
|
||||||
|
public class AsyncBackupService {
|
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(AsyncBackupService.class);
|
||||||
|
|
||||||
|
@Value("${storycove.upload.dir:/app/images}")
|
||||||
|
private String uploadDir;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private BackupJobRepository backupJobRepository;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private AsyncBackupExecutor asyncBackupExecutor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Start a backup job asynchronously.
|
||||||
|
* This method returns immediately after creating the job record.
|
||||||
|
*/
|
||||||
|
@Transactional
|
||||||
|
public BackupJob startBackupJob(String libraryId, BackupJob.BackupType type) {
|
||||||
|
logger.info("Creating backup job for library: {}, type: {}", libraryId, type);
|
||||||
|
|
||||||
|
BackupJob job = new BackupJob(libraryId, type);
|
||||||
|
job = backupJobRepository.save(job);
|
||||||
|
|
||||||
|
logger.info("Backup job created with ID: {}. Starting async execution...", job.getId());
|
||||||
|
|
||||||
|
// Start backup in background using separate service (ensures @Async works properly)
|
||||||
|
asyncBackupExecutor.executeBackupAsync(job.getId());
|
||||||
|
|
||||||
|
logger.info("Async backup execution triggered for job: {}", job.getId());
|
||||||
|
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get backup job status
|
||||||
|
*/
|
||||||
|
public Optional<BackupJob> getJobStatus(UUID jobId) {
|
||||||
|
return backupJobRepository.findById(jobId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get backup file for download
|
||||||
|
*/
|
||||||
|
public Resource getBackupFile(UUID jobId) throws IOException {
|
||||||
|
Optional<BackupJob> jobOpt = backupJobRepository.findById(jobId);
|
||||||
|
if (jobOpt.isEmpty()) {
|
||||||
|
throw new IOException("Backup job not found");
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupJob job = jobOpt.get();
|
||||||
|
|
||||||
|
if (!job.isCompleted()) {
|
||||||
|
throw new IOException("Backup is not completed yet");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (job.isExpired()) {
|
||||||
|
throw new IOException("Backup has expired");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (job.getFilePath() == null) {
|
||||||
|
throw new IOException("Backup file path not set");
|
||||||
|
}
|
||||||
|
|
||||||
|
Path backupPath = Paths.get(job.getFilePath());
|
||||||
|
if (!Files.exists(backupPath)) {
|
||||||
|
throw new IOException("Backup file not found");
|
||||||
|
}
|
||||||
|
|
||||||
|
return new FileSystemResource(backupPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List backup jobs for a library
|
||||||
|
*/
|
||||||
|
public List<BackupJob> listBackupJobs(String libraryId) {
|
||||||
|
return backupJobRepository.findByLibraryIdOrderByCreatedAtDesc(libraryId);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean up expired backup jobs and their files
|
||||||
|
* Runs daily at 2 AM
|
||||||
|
*/
|
||||||
|
@Scheduled(cron = "0 0 2 * * ?")
|
||||||
|
@Transactional
|
||||||
|
public void cleanupExpiredBackups() {
|
||||||
|
logger.info("Starting cleanup of expired backups");
|
||||||
|
|
||||||
|
LocalDateTime now = LocalDateTime.now();
|
||||||
|
|
||||||
|
// Mark expired jobs
|
||||||
|
int markedCount = backupJobRepository.markExpiredJobs(now);
|
||||||
|
logger.info("Marked {} jobs as expired", markedCount);
|
||||||
|
|
||||||
|
// Find all expired jobs to delete their files
|
||||||
|
List<BackupJob> expiredJobs = backupJobRepository.findExpiredJobs(now);
|
||||||
|
|
||||||
|
for (BackupJob job : expiredJobs) {
|
||||||
|
if (job.getFilePath() != null) {
|
||||||
|
try {
|
||||||
|
Path filePath = Paths.get(job.getFilePath());
|
||||||
|
if (Files.exists(filePath)) {
|
||||||
|
Files.delete(filePath);
|
||||||
|
logger.info("Deleted expired backup file: {}", filePath);
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.warn("Failed to delete expired backup file: {}", job.getFilePath(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the job record
|
||||||
|
backupJobRepository.delete(job);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Cleanup completed. Deleted {} expired backups", expiredJobs.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a specific backup job and its file
|
||||||
|
*/
|
||||||
|
@Transactional
|
||||||
|
public void deleteBackupJob(UUID jobId) throws IOException {
|
||||||
|
Optional<BackupJob> jobOpt = backupJobRepository.findById(jobId);
|
||||||
|
if (jobOpt.isEmpty()) {
|
||||||
|
throw new IOException("Backup job not found");
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupJob job = jobOpt.get();
|
||||||
|
|
||||||
|
// Delete file if it exists
|
||||||
|
if (job.getFilePath() != null) {
|
||||||
|
Path filePath = Paths.get(job.getFilePath());
|
||||||
|
if (Files.exists(filePath)) {
|
||||||
|
Files.delete(filePath);
|
||||||
|
logger.info("Deleted backup file: {}", filePath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete job record
|
||||||
|
backupJobRepository.delete(job);
|
||||||
|
logger.info("Deleted backup job: {}", jobId);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,262 @@
|
|||||||
|
package com.storycove.service;
|
||||||
|
|
||||||
|
import com.storycove.repository.StoryRepository;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.core.io.Resource;
|
||||||
|
import org.springframework.scheduling.annotation.Scheduled;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
|
import java.time.LocalDateTime;
|
||||||
|
import java.time.format.DateTimeFormatter;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Service for automatic daily backups.
|
||||||
|
* Runs at 4 AM daily and creates a backup if content has changed since last backup.
|
||||||
|
* Keeps maximum of 5 backups, rotating old ones out.
|
||||||
|
*/
|
||||||
|
@Service
|
||||||
|
public class AutomaticBackupService {
|
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(AutomaticBackupService.class);
|
||||||
|
private static final int MAX_BACKUPS = 5;
|
||||||
|
private static final DateTimeFormatter FILENAME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd_HH-mm-ss");
|
||||||
|
|
||||||
|
@Value("${storycove.automatic-backup.dir:/app/automatic-backups}")
|
||||||
|
private String automaticBackupDir;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private StoryRepository storyRepository;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private DatabaseManagementService databaseManagementService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private LibraryService libraryService;
|
||||||
|
|
||||||
|
private LocalDateTime lastBackupCheck = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scheduled job that runs daily at 4 AM.
|
||||||
|
* Creates a backup if content has changed since last backup.
|
||||||
|
*/
|
||||||
|
@Scheduled(cron = "0 0 4 * * ?")
|
||||||
|
public void performAutomaticBackup() {
|
||||||
|
logger.info("========================================");
|
||||||
|
logger.info("Starting automatic backup check at 4 AM");
|
||||||
|
logger.info("========================================");
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get current library ID (or default)
|
||||||
|
String libraryId = libraryService.getCurrentLibraryId();
|
||||||
|
if (libraryId == null) {
|
||||||
|
libraryId = "default";
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Checking for content changes in library: {}", libraryId);
|
||||||
|
|
||||||
|
// Check if content has changed since last backup
|
||||||
|
if (!hasContentChanged()) {
|
||||||
|
logger.info("No content changes detected since last backup. Skipping backup.");
|
||||||
|
logger.info("========================================");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Content changes detected! Creating automatic backup...");
|
||||||
|
|
||||||
|
// Create backup directory for this library
|
||||||
|
Path backupPath = Paths.get(automaticBackupDir, libraryId);
|
||||||
|
Files.createDirectories(backupPath);
|
||||||
|
|
||||||
|
// Create the backup
|
||||||
|
String timestamp = LocalDateTime.now().format(FILENAME_FORMATTER);
|
||||||
|
String filename = String.format("auto_backup_%s.zip", timestamp);
|
||||||
|
Path backupFile = backupPath.resolve(filename);
|
||||||
|
|
||||||
|
logger.info("Creating complete backup to: {}", backupFile);
|
||||||
|
|
||||||
|
Resource backup = databaseManagementService.createCompleteBackup();
|
||||||
|
|
||||||
|
// Write backup to file
|
||||||
|
try (var inputStream = backup.getInputStream();
|
||||||
|
var outputStream = Files.newOutputStream(backupFile)) {
|
||||||
|
inputStream.transferTo(outputStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
long fileSize = Files.size(backupFile);
|
||||||
|
logger.info("✅ Automatic backup created successfully");
|
||||||
|
logger.info(" File: {}", backupFile.getFileName());
|
||||||
|
logger.info(" Size: {} MB", fileSize / 1024 / 1024);
|
||||||
|
|
||||||
|
// Rotate old backups (keep only MAX_BACKUPS)
|
||||||
|
rotateBackups(backupPath);
|
||||||
|
|
||||||
|
// Update last backup check time
|
||||||
|
lastBackupCheck = LocalDateTime.now();
|
||||||
|
|
||||||
|
logger.info("========================================");
|
||||||
|
logger.info("Automatic backup completed successfully");
|
||||||
|
logger.info("========================================");
|
||||||
|
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.error("❌ Automatic backup failed", e);
|
||||||
|
logger.info("========================================");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if content has changed since last backup.
|
||||||
|
* Looks for stories created or updated after the last backup time.
|
||||||
|
*/
|
||||||
|
private boolean hasContentChanged() {
|
||||||
|
try {
|
||||||
|
if (lastBackupCheck == null) {
|
||||||
|
// First run - check if there are any stories at all
|
||||||
|
long storyCount = storyRepository.count();
|
||||||
|
logger.info("First backup check - found {} stories", storyCount);
|
||||||
|
return storyCount > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for stories created or updated since last backup
|
||||||
|
long changedCount = storyRepository.countStoriesModifiedAfter(lastBackupCheck);
|
||||||
|
logger.info("Found {} stories modified since last backup ({})", changedCount, lastBackupCheck);
|
||||||
|
return changedCount > 0;
|
||||||
|
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.error("Error checking for content changes", e);
|
||||||
|
// On error, create backup to be safe
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rotate backups - keep only MAX_BACKUPS most recent backups.
|
||||||
|
* Deletes older backups.
|
||||||
|
*/
|
||||||
|
private void rotateBackups(Path backupPath) throws IOException {
|
||||||
|
logger.info("Checking for old backups to rotate...");
|
||||||
|
|
||||||
|
// Find all backup files in the directory
|
||||||
|
List<Path> backupFiles;
|
||||||
|
try (Stream<Path> stream = Files.list(backupPath)) {
|
||||||
|
backupFiles = stream
|
||||||
|
.filter(Files::isRegularFile)
|
||||||
|
.filter(p -> p.getFileName().toString().startsWith("auto_backup_"))
|
||||||
|
.filter(p -> p.getFileName().toString().endsWith(".zip"))
|
||||||
|
.sorted(Comparator.comparing((Path p) -> {
|
||||||
|
try {
|
||||||
|
return Files.getLastModifiedTime(p);
|
||||||
|
} catch (IOException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}).reversed()) // Most recent first
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Found {} automatic backups", backupFiles.size());
|
||||||
|
|
||||||
|
// Delete old backups if we exceed MAX_BACKUPS
|
||||||
|
if (backupFiles.size() > MAX_BACKUPS) {
|
||||||
|
List<Path> toDelete = backupFiles.subList(MAX_BACKUPS, backupFiles.size());
|
||||||
|
logger.info("Deleting {} old backups to maintain maximum of {}", toDelete.size(), MAX_BACKUPS);
|
||||||
|
|
||||||
|
for (Path oldBackup : toDelete) {
|
||||||
|
try {
|
||||||
|
Files.delete(oldBackup);
|
||||||
|
logger.info(" Deleted old backup: {}", oldBackup.getFileName());
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.warn("Failed to delete old backup: {}", oldBackup, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.info("Backup count within limit ({}), no rotation needed", MAX_BACKUPS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Manual trigger for testing - creates backup immediately if content changed.
|
||||||
|
*/
|
||||||
|
public void triggerManualBackup() {
|
||||||
|
logger.info("Manual automatic backup triggered");
|
||||||
|
performAutomaticBackup();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get list of automatic backups for the current library.
|
||||||
|
*/
|
||||||
|
public List<BackupInfo> listAutomaticBackups() throws IOException {
|
||||||
|
String libraryId = libraryService.getCurrentLibraryId();
|
||||||
|
if (libraryId == null) {
|
||||||
|
libraryId = "default";
|
||||||
|
}
|
||||||
|
|
||||||
|
Path backupPath = Paths.get(automaticBackupDir, libraryId);
|
||||||
|
if (!Files.exists(backupPath)) {
|
||||||
|
return List.of();
|
||||||
|
}
|
||||||
|
|
||||||
|
try (Stream<Path> stream = Files.list(backupPath)) {
|
||||||
|
return stream
|
||||||
|
.filter(Files::isRegularFile)
|
||||||
|
.filter(p -> p.getFileName().toString().startsWith("auto_backup_"))
|
||||||
|
.filter(p -> p.getFileName().toString().endsWith(".zip"))
|
||||||
|
.sorted(Comparator.comparing((Path p) -> {
|
||||||
|
try {
|
||||||
|
return Files.getLastModifiedTime(p);
|
||||||
|
} catch (IOException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}).reversed())
|
||||||
|
.map(p -> {
|
||||||
|
try {
|
||||||
|
return new BackupInfo(
|
||||||
|
p.getFileName().toString(),
|
||||||
|
Files.size(p),
|
||||||
|
Files.getLastModifiedTime(p).toInstant().toString()
|
||||||
|
);
|
||||||
|
} catch (IOException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.filter(info -> info != null)
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Simple backup info class.
|
||||||
|
*/
|
||||||
|
public static class BackupInfo {
|
||||||
|
private final String filename;
|
||||||
|
private final long sizeBytes;
|
||||||
|
private final String createdAt;
|
||||||
|
|
||||||
|
public BackupInfo(String filename, long sizeBytes, String createdAt) {
|
||||||
|
this.filename = filename;
|
||||||
|
this.sizeBytes = sizeBytes;
|
||||||
|
this.createdAt = createdAt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getFilename() {
|
||||||
|
return filename;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getSizeBytes() {
|
||||||
|
return sizeBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getCreatedAt() {
|
||||||
|
return createdAt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,7 +7,6 @@ import org.springframework.beans.factory.annotation.Qualifier;
|
|||||||
import org.springframework.beans.factory.annotation.Value;
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
import org.springframework.context.ApplicationContext;
|
import org.springframework.context.ApplicationContext;
|
||||||
import org.springframework.context.ApplicationContextAware;
|
import org.springframework.context.ApplicationContextAware;
|
||||||
import org.springframework.core.io.ByteArrayResource;
|
|
||||||
import org.springframework.core.io.Resource;
|
import org.springframework.core.io.Resource;
|
||||||
import org.springframework.stereotype.Service;
|
import org.springframework.stereotype.Service;
|
||||||
import org.springframework.transaction.annotation.Transactional;
|
import org.springframework.transaction.annotation.Transactional;
|
||||||
@@ -141,9 +140,12 @@ public class DatabaseManagementService implements ApplicationContextAware {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a comprehensive backup including database and files in ZIP format
|
* Create a comprehensive backup including database and files in ZIP format
|
||||||
|
* Returns a streaming resource to avoid loading large backups into memory
|
||||||
*/
|
*/
|
||||||
public Resource createCompleteBackup() throws SQLException, IOException {
|
public Resource createCompleteBackup() throws SQLException, IOException {
|
||||||
|
// Create temp file with deleteOnExit as safety net
|
||||||
Path tempZip = Files.createTempFile("storycove-backup", ".zip");
|
Path tempZip = Files.createTempFile("storycove-backup", ".zip");
|
||||||
|
tempZip.toFile().deleteOnExit();
|
||||||
|
|
||||||
try (ZipOutputStream zipOut = new ZipOutputStream(Files.newOutputStream(tempZip))) {
|
try (ZipOutputStream zipOut = new ZipOutputStream(Files.newOutputStream(tempZip))) {
|
||||||
// 1. Add database dump
|
// 1. Add database dump
|
||||||
@@ -156,11 +158,30 @@ public class DatabaseManagementService implements ApplicationContextAware {
|
|||||||
addMetadataToZip(zipOut);
|
addMetadataToZip(zipOut);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the ZIP file as a resource
|
// Return the ZIP file as a FileSystemResource for streaming
|
||||||
byte[] zipData = Files.readAllBytes(tempZip);
|
// This avoids loading the entire file into memory
|
||||||
|
return new org.springframework.core.io.FileSystemResource(tempZip.toFile()) {
|
||||||
|
@Override
|
||||||
|
public InputStream getInputStream() throws IOException {
|
||||||
|
// Wrap the input stream to delete the temp file after it's fully read
|
||||||
|
return new java.io.FilterInputStream(super.getInputStream()) {
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
try {
|
||||||
|
super.close();
|
||||||
|
} finally {
|
||||||
|
// Clean up temp file after streaming is complete
|
||||||
|
try {
|
||||||
Files.deleteIfExists(tempZip);
|
Files.deleteIfExists(tempZip);
|
||||||
|
} catch (IOException e) {
|
||||||
return new ByteArrayResource(zipData);
|
// Log but don't fail - deleteOnExit will handle it
|
||||||
|
System.err.println("Warning: Could not delete temp backup file: " + e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -289,20 +310,34 @@ public class DatabaseManagementService implements ApplicationContextAware {
|
|||||||
|
|
||||||
System.err.println("PostgreSQL backup completed successfully");
|
System.err.println("PostgreSQL backup completed successfully");
|
||||||
|
|
||||||
// Read the backup file into memory
|
// Return the backup file as a streaming resource to avoid memory issues with large databases
|
||||||
byte[] backupData = Files.readAllBytes(tempBackupFile);
|
tempBackupFile.toFile().deleteOnExit();
|
||||||
return new ByteArrayResource(backupData);
|
return new org.springframework.core.io.FileSystemResource(tempBackupFile.toFile()) {
|
||||||
|
@Override
|
||||||
|
public InputStream getInputStream() throws IOException {
|
||||||
|
// Wrap the input stream to delete the temp file after it's fully read
|
||||||
|
return new java.io.FilterInputStream(super.getInputStream()) {
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
try {
|
||||||
|
super.close();
|
||||||
|
} finally {
|
||||||
|
// Clean up temp file after streaming is complete
|
||||||
|
try {
|
||||||
|
Files.deleteIfExists(tempBackupFile);
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Log but don't fail - deleteOnExit will handle it
|
||||||
|
System.err.println("Warning: Could not delete temp backup file: " + e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
throw new RuntimeException("Backup process was interrupted", e);
|
throw new RuntimeException("Backup process was interrupted", e);
|
||||||
} finally {
|
|
||||||
// Clean up temporary file
|
|
||||||
try {
|
|
||||||
Files.deleteIfExists(tempBackupFile);
|
|
||||||
} catch (IOException e) {
|
|
||||||
System.err.println("Warning: Could not delete temporary backup file: " + e.getMessage());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -89,6 +89,8 @@ storycove:
|
|||||||
enable-metrics: ${SOLR_ENABLE_METRICS:true}
|
enable-metrics: ${SOLR_ENABLE_METRICS:true}
|
||||||
images:
|
images:
|
||||||
storage-path: ${IMAGE_STORAGE_PATH:/app/images}
|
storage-path: ${IMAGE_STORAGE_PATH:/app/images}
|
||||||
|
automatic-backup:
|
||||||
|
dir: ${AUTOMATIC_BACKUP_DIR:/app/automatic-backups}
|
||||||
|
|
||||||
management:
|
management:
|
||||||
endpoints:
|
endpoints:
|
||||||
|
|||||||
@@ -55,6 +55,11 @@ if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Apply database migrations
|
||||||
|
echo -e "${YELLOW}🗄️ Applying database migrations...${NC}"
|
||||||
|
docker-compose run --rm migrations
|
||||||
|
echo -e "${GREEN}✅ Database migrations applied${NC}"
|
||||||
|
|
||||||
# Check if Solr is ready
|
# Check if Solr is ready
|
||||||
echo -e "${YELLOW}🔍 Checking Solr health...${NC}"
|
echo -e "${YELLOW}🔍 Checking Solr health...${NC}"
|
||||||
RETRY_COUNT=0
|
RETRY_COUNT=0
|
||||||
|
|||||||
@@ -44,9 +44,10 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- images_data:/app/images
|
- images_data:/app/images
|
||||||
- library_config:/app/config
|
- library_config:/app/config
|
||||||
|
- automatic_backups:/app/automatic-backups
|
||||||
depends_on:
|
depends_on:
|
||||||
postgres:
|
postgres:
|
||||||
condition: service_started
|
condition: service_healthy
|
||||||
solr:
|
solr:
|
||||||
condition: service_started
|
condition: service_started
|
||||||
networks:
|
networks:
|
||||||
@@ -65,6 +66,11 @@ services:
|
|||||||
- postgres_data:/var/lib/postgresql/data
|
- postgres_data:/var/lib/postgresql/data
|
||||||
networks:
|
networks:
|
||||||
- storycove-network
|
- storycove-network
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U storycove -d storycove"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
|
||||||
solr:
|
solr:
|
||||||
@@ -101,6 +107,7 @@ volumes:
|
|||||||
solr_data:
|
solr_data:
|
||||||
images_data:
|
images_data:
|
||||||
library_config:
|
library_config:
|
||||||
|
automatic_backups:
|
||||||
|
|
||||||
configs:
|
configs:
|
||||||
nginx_config:
|
nginx_config:
|
||||||
|
|||||||
@@ -33,11 +33,18 @@ export default function SystemSettings({}: SystemSettingsProps) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const [databaseStatus, setDatabaseStatus] = useState<{
|
const [databaseStatus, setDatabaseStatus] = useState<{
|
||||||
completeBackup: { loading: boolean; message: string; success?: boolean };
|
completeBackup: {
|
||||||
|
loading: boolean;
|
||||||
|
message: string;
|
||||||
|
success?: boolean;
|
||||||
|
jobId?: string;
|
||||||
|
progress?: number;
|
||||||
|
downloadReady?: boolean;
|
||||||
|
};
|
||||||
completeRestore: { loading: boolean; message: string; success?: boolean };
|
completeRestore: { loading: boolean; message: string; success?: boolean };
|
||||||
completeClear: { loading: boolean; message: string; success?: boolean };
|
completeClear: { loading: boolean; message: string; success?: boolean };
|
||||||
}>({
|
}>({
|
||||||
completeBackup: { loading: false, message: '' },
|
completeBackup: { loading: false, message: '', progress: 0 },
|
||||||
completeRestore: { loading: false, message: '' },
|
completeRestore: { loading: false, message: '' },
|
||||||
completeClear: { loading: false, message: '' }
|
completeClear: { loading: false, message: '' }
|
||||||
});
|
});
|
||||||
@@ -73,43 +80,117 @@ export default function SystemSettings({}: SystemSettingsProps) {
|
|||||||
const handleCompleteBackup = async () => {
|
const handleCompleteBackup = async () => {
|
||||||
setDatabaseStatus(prev => ({
|
setDatabaseStatus(prev => ({
|
||||||
...prev,
|
...prev,
|
||||||
completeBackup: { loading: true, message: 'Creating complete backup...', success: undefined }
|
completeBackup: { loading: true, message: 'Starting backup...', success: undefined, progress: 0, downloadReady: false }
|
||||||
}));
|
}));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const backupBlob = await databaseApi.backupComplete();
|
// Start the async backup job
|
||||||
|
const startResponse = await databaseApi.backupComplete();
|
||||||
// Create download link
|
const jobId = startResponse.jobId;
|
||||||
const url = window.URL.createObjectURL(backupBlob);
|
|
||||||
const link = document.createElement('a');
|
|
||||||
link.href = url;
|
|
||||||
|
|
||||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
|
|
||||||
link.download = `storycove_complete_backup_${timestamp}.zip`;
|
|
||||||
|
|
||||||
document.body.appendChild(link);
|
|
||||||
link.click();
|
|
||||||
document.body.removeChild(link);
|
|
||||||
window.URL.revokeObjectURL(url);
|
|
||||||
|
|
||||||
setDatabaseStatus(prev => ({
|
setDatabaseStatus(prev => ({
|
||||||
...prev,
|
...prev,
|
||||||
completeBackup: { loading: false, message: 'Complete backup downloaded successfully', success: true }
|
completeBackup: { ...prev.completeBackup, jobId, message: 'Backup in progress...' }
|
||||||
}));
|
}));
|
||||||
} catch (error: any) {
|
|
||||||
|
// Poll for progress
|
||||||
|
const pollInterval = setInterval(async () => {
|
||||||
|
try {
|
||||||
|
const status = await databaseApi.getBackupStatus(jobId);
|
||||||
|
|
||||||
|
if (status.status === 'COMPLETED') {
|
||||||
|
clearInterval(pollInterval);
|
||||||
setDatabaseStatus(prev => ({
|
setDatabaseStatus(prev => ({
|
||||||
...prev,
|
...prev,
|
||||||
completeBackup: { loading: false, message: error.message || 'Complete backup failed', success: false }
|
completeBackup: {
|
||||||
}));
|
loading: false,
|
||||||
|
message: 'Backup completed! Ready to download.',
|
||||||
|
success: true,
|
||||||
|
jobId,
|
||||||
|
progress: 100,
|
||||||
|
downloadReady: true
|
||||||
}
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
// Clear message after 5 seconds
|
// Clear message after 30 seconds (keep download button visible)
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
setDatabaseStatus(prev => ({
|
setDatabaseStatus(prev => ({
|
||||||
...prev,
|
...prev,
|
||||||
completeBackup: { loading: false, message: '', success: undefined }
|
completeBackup: { ...prev.completeBackup, message: '' }
|
||||||
|
}));
|
||||||
|
}, 30000);
|
||||||
|
} else if (status.status === 'FAILED') {
|
||||||
|
clearInterval(pollInterval);
|
||||||
|
setDatabaseStatus(prev => ({
|
||||||
|
...prev,
|
||||||
|
completeBackup: {
|
||||||
|
loading: false,
|
||||||
|
message: `Backup failed: ${status.errorMessage}`,
|
||||||
|
success: false,
|
||||||
|
progress: 0,
|
||||||
|
downloadReady: false
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
} else {
|
||||||
|
// Update progress
|
||||||
|
setDatabaseStatus(prev => ({
|
||||||
|
...prev,
|
||||||
|
completeBackup: {
|
||||||
|
...prev.completeBackup,
|
||||||
|
progress: status.progress,
|
||||||
|
message: `Creating backup... ${status.progress}%`
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
} catch (pollError: any) {
|
||||||
|
clearInterval(pollInterval);
|
||||||
|
setDatabaseStatus(prev => ({
|
||||||
|
...prev,
|
||||||
|
completeBackup: {
|
||||||
|
loading: false,
|
||||||
|
message: `Failed to check backup status: ${pollError.message}`,
|
||||||
|
success: false,
|
||||||
|
progress: 0,
|
||||||
|
downloadReady: false
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}, 2000); // Poll every 2 seconds
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
setDatabaseStatus(prev => ({
|
||||||
|
...prev,
|
||||||
|
completeBackup: {
|
||||||
|
loading: false,
|
||||||
|
message: error.message || 'Failed to start backup',
|
||||||
|
success: false,
|
||||||
|
progress: 0,
|
||||||
|
downloadReady: false
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleDownloadBackup = (jobId: string) => {
|
||||||
|
const downloadUrl = databaseApi.downloadBackup(jobId);
|
||||||
|
const link = document.createElement('a');
|
||||||
|
link.href = downloadUrl;
|
||||||
|
link.download = ''; // Filename will be set by server
|
||||||
|
document.body.appendChild(link);
|
||||||
|
link.click();
|
||||||
|
document.body.removeChild(link);
|
||||||
|
|
||||||
|
// Clear the download ready state after download
|
||||||
|
setDatabaseStatus(prev => ({
|
||||||
|
...prev,
|
||||||
|
completeBackup: {
|
||||||
|
loading: false,
|
||||||
|
message: 'Backup downloaded successfully',
|
||||||
|
success: true,
|
||||||
|
progress: 100,
|
||||||
|
downloadReady: false
|
||||||
|
}
|
||||||
}));
|
}));
|
||||||
}, 5000);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleCompleteRestore = async (event: React.ChangeEvent<HTMLInputElement>) => {
|
const handleCompleteRestore = async (event: React.ChangeEvent<HTMLInputElement>) => {
|
||||||
@@ -792,20 +873,50 @@ export default function SystemSettings({}: SystemSettingsProps) {
|
|||||||
<p className="text-sm theme-text mb-3">
|
<p className="text-sm theme-text mb-3">
|
||||||
Download a complete backup as a ZIP file. This includes your database AND all uploaded files (cover images, avatars). This is a comprehensive backup of your entire StoryCove installation.
|
Download a complete backup as a ZIP file. This includes your database AND all uploaded files (cover images, avatars). This is a comprehensive backup of your entire StoryCove installation.
|
||||||
</p>
|
</p>
|
||||||
|
<div className="space-y-3">
|
||||||
<Button
|
<Button
|
||||||
onClick={handleCompleteBackup}
|
onClick={handleCompleteBackup}
|
||||||
disabled={databaseStatus.completeBackup.loading}
|
disabled={databaseStatus.completeBackup.loading || databaseStatus.completeBackup.downloadReady}
|
||||||
loading={databaseStatus.completeBackup.loading}
|
loading={databaseStatus.completeBackup.loading}
|
||||||
variant="primary"
|
variant="primary"
|
||||||
className="w-full sm:w-auto"
|
className="w-full sm:w-auto"
|
||||||
>
|
>
|
||||||
{databaseStatus.completeBackup.loading ? 'Creating Backup...' : 'Download Backup'}
|
{databaseStatus.completeBackup.loading ? 'Creating Backup...' : 'Create Backup'}
|
||||||
</Button>
|
</Button>
|
||||||
|
|
||||||
|
{databaseStatus.completeBackup.downloadReady && databaseStatus.completeBackup.jobId && (
|
||||||
|
<Button
|
||||||
|
onClick={() => handleDownloadBackup(databaseStatus.completeBackup.jobId!)}
|
||||||
|
variant="primary"
|
||||||
|
className="w-full sm:w-auto ml-0 sm:ml-3 bg-green-600 hover:bg-green-700"
|
||||||
|
>
|
||||||
|
⬇️ Download Backup
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{databaseStatus.completeBackup.loading && databaseStatus.completeBackup.progress !== undefined && (
|
||||||
|
<div className="mt-3">
|
||||||
|
<div className="flex justify-between text-sm theme-text mb-1">
|
||||||
|
<span>Progress</span>
|
||||||
|
<span>{databaseStatus.completeBackup.progress}%</span>
|
||||||
|
</div>
|
||||||
|
<div className="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2.5">
|
||||||
|
<div
|
||||||
|
className="bg-blue-600 dark:bg-blue-500 h-2.5 rounded-full transition-all duration-300"
|
||||||
|
style={{ width: `${databaseStatus.completeBackup.progress}%` }}
|
||||||
|
></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
{databaseStatus.completeBackup.message && (
|
{databaseStatus.completeBackup.message && (
|
||||||
<div className={`text-sm p-2 rounded mt-3 ${
|
<div className={`text-sm p-2 rounded mt-3 ${
|
||||||
databaseStatus.completeBackup.success
|
databaseStatus.completeBackup.success
|
||||||
? 'bg-green-50 dark:bg-green-900/20 text-green-800 dark:text-green-200'
|
? 'bg-green-50 dark:bg-green-900/20 text-green-800 dark:text-green-200'
|
||||||
: 'bg-red-50 dark:bg-red-900/20 text-red-800 dark:text-red-200'
|
: databaseStatus.completeBackup.success === false
|
||||||
|
? 'bg-red-50 dark:bg-red-900/20 text-red-800 dark:text-red-200'
|
||||||
|
: 'bg-blue-50 dark:bg-blue-900/20 text-blue-800 dark:text-blue-200'
|
||||||
}`}>
|
}`}>
|
||||||
{databaseStatus.completeBackup.message}
|
{databaseStatus.completeBackup.message}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -114,9 +114,10 @@ const htmlToSlate = (html: string): Descendant[] => {
|
|||||||
const img = element as HTMLImageElement;
|
const img = element as HTMLImageElement;
|
||||||
results.push({
|
results.push({
|
||||||
type: 'image',
|
type: 'image',
|
||||||
src: img.src || img.getAttribute('src') || '',
|
// Use getAttribute to preserve relative URLs instead of .src which converts to absolute
|
||||||
alt: img.alt || img.getAttribute('alt') || '',
|
src: img.getAttribute('src') || '',
|
||||||
caption: img.title || img.getAttribute('title') || '',
|
alt: img.getAttribute('alt') || '',
|
||||||
|
caption: img.getAttribute('title') || '',
|
||||||
children: [{ text: '' }] // Images need children in Slate
|
children: [{ text: '' }] // Images need children in Slate
|
||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -1013,10 +1013,47 @@ export const databaseApi = {
|
|||||||
return response.data;
|
return response.data;
|
||||||
},
|
},
|
||||||
|
|
||||||
backupComplete: async (): Promise<Blob> => {
|
backupComplete: async (): Promise<{ success: boolean; jobId: string; status: string; message: string }> => {
|
||||||
const response = await api.post('/database/backup-complete', {}, {
|
const response = await api.post('/database/backup-complete');
|
||||||
responseType: 'blob'
|
return response.data;
|
||||||
});
|
},
|
||||||
|
|
||||||
|
getBackupStatus: async (jobId: string): Promise<{
|
||||||
|
success: boolean;
|
||||||
|
jobId: string;
|
||||||
|
status: string;
|
||||||
|
progress: number;
|
||||||
|
fileSizeBytes: number;
|
||||||
|
createdAt: string;
|
||||||
|
completedAt: string;
|
||||||
|
errorMessage: string;
|
||||||
|
}> => {
|
||||||
|
const response = await api.get(`/database/backup-status/${jobId}`);
|
||||||
|
return response.data;
|
||||||
|
},
|
||||||
|
|
||||||
|
downloadBackup: (jobId: string): string => {
|
||||||
|
return `/api/database/backup-download/${jobId}`;
|
||||||
|
},
|
||||||
|
|
||||||
|
listBackups: async (): Promise<{
|
||||||
|
success: boolean;
|
||||||
|
backups: Array<{
|
||||||
|
jobId: string;
|
||||||
|
type: string;
|
||||||
|
status: string;
|
||||||
|
progress: number;
|
||||||
|
fileSizeBytes: number;
|
||||||
|
createdAt: string;
|
||||||
|
completedAt: string;
|
||||||
|
}>;
|
||||||
|
}> => {
|
||||||
|
const response = await api.get('/database/backup-list');
|
||||||
|
return response.data;
|
||||||
|
},
|
||||||
|
|
||||||
|
deleteBackup: async (jobId: string): Promise<{ success: boolean; message: string }> => {
|
||||||
|
const response = await api.delete(`/database/backup/${jobId}`);
|
||||||
return response.data;
|
return response.data;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user