From 3dd2ff50d87ff36070030ad87225ec3973bacc70 Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 08:58:09 +0200 Subject: [PATCH 1/9] Fix for memory issue during backup --- .../service/DatabaseManagementService.java | 75 ++++++++++++++----- 1 file changed, 55 insertions(+), 20 deletions(-) diff --git a/backend/src/main/java/com/storycove/service/DatabaseManagementService.java b/backend/src/main/java/com/storycove/service/DatabaseManagementService.java index 44446f0..547930a 100644 --- a/backend/src/main/java/com/storycove/service/DatabaseManagementService.java +++ b/backend/src/main/java/com/storycove/service/DatabaseManagementService.java @@ -7,7 +7,6 @@ import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; -import org.springframework.core.io.ByteArrayResource; import org.springframework.core.io.Resource; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; @@ -141,26 +140,48 @@ public class DatabaseManagementService implements ApplicationContextAware { /** * Create a comprehensive backup including database and files in ZIP format + * Returns a streaming resource to avoid loading large backups into memory */ public Resource createCompleteBackup() throws SQLException, IOException { + // Create temp file with deleteOnExit as safety net Path tempZip = Files.createTempFile("storycove-backup", ".zip"); - + tempZip.toFile().deleteOnExit(); + try (ZipOutputStream zipOut = new ZipOutputStream(Files.newOutputStream(tempZip))) { // 1. Add database dump addDatabaseDumpToZip(zipOut); - + // 2. Add all image files addFilesToZip(zipOut); - + // 3. Add metadata addMetadataToZip(zipOut); } - - // Return the ZIP file as a resource - byte[] zipData = Files.readAllBytes(tempZip); - Files.deleteIfExists(tempZip); - - return new ByteArrayResource(zipData); + + // Return the ZIP file as a FileSystemResource for streaming + // This avoids loading the entire file into memory + return new org.springframework.core.io.FileSystemResource(tempZip.toFile()) { + @Override + public InputStream getInputStream() throws IOException { + // Wrap the input stream to delete the temp file after it's fully read + return new java.io.FilterInputStream(super.getInputStream()) { + @Override + public void close() throws IOException { + try { + super.close(); + } finally { + // Clean up temp file after streaming is complete + try { + Files.deleteIfExists(tempZip); + } catch (IOException e) { + // Log but don't fail - deleteOnExit will handle it + System.err.println("Warning: Could not delete temp backup file: " + e.getMessage()); + } + } + } + }; + } + }; } /** @@ -289,20 +310,34 @@ public class DatabaseManagementService implements ApplicationContextAware { System.err.println("PostgreSQL backup completed successfully"); - // Read the backup file into memory - byte[] backupData = Files.readAllBytes(tempBackupFile); - return new ByteArrayResource(backupData); + // Return the backup file as a streaming resource to avoid memory issues with large databases + tempBackupFile.toFile().deleteOnExit(); + return new org.springframework.core.io.FileSystemResource(tempBackupFile.toFile()) { + @Override + public InputStream getInputStream() throws IOException { + // Wrap the input stream to delete the temp file after it's fully read + return new java.io.FilterInputStream(super.getInputStream()) { + @Override + public void close() throws IOException { + try { + super.close(); + } finally { + // Clean up temp file after streaming is complete + try { + Files.deleteIfExists(tempBackupFile); + } catch (IOException e) { + // Log but don't fail - deleteOnExit will handle it + System.err.println("Warning: Could not delete temp backup file: " + e.getMessage()); + } + } + } + }; + } + }; } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException("Backup process was interrupted", e); - } finally { - // Clean up temporary file - try { - Files.deleteIfExists(tempBackupFile); - } catch (IOException e) { - System.err.println("Warning: Could not delete temporary backup file: " + e.getMessage()); - } } } From c9d58173f3fee7ff1648b1dbe812de1b6a4ebcd2 Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 09:23:34 +0200 Subject: [PATCH 2/9] improved backup creation --- .../controller/DatabaseController.java | 147 ++++++++++- .../java/com/storycove/entity/BackupJob.java | 195 ++++++++++++++ .../repository/BackupJobRepository.java | 25 ++ .../storycove/service/AsyncBackupService.java | 245 ++++++++++++++++++ .../components/settings/SystemSettings.tsx | 183 ++++++++++--- frontend/src/lib/api.ts | 45 +++- 6 files changed, 792 insertions(+), 48 deletions(-) create mode 100644 backend/src/main/java/com/storycove/entity/BackupJob.java create mode 100644 backend/src/main/java/com/storycove/repository/BackupJobRepository.java create mode 100644 backend/src/main/java/com/storycove/service/AsyncBackupService.java diff --git a/backend/src/main/java/com/storycove/controller/DatabaseController.java b/backend/src/main/java/com/storycove/controller/DatabaseController.java index e016bef..28906a8 100644 --- a/backend/src/main/java/com/storycove/controller/DatabaseController.java +++ b/backend/src/main/java/com/storycove/controller/DatabaseController.java @@ -1,6 +1,8 @@ package com.storycove.controller; +import com.storycove.service.AsyncBackupService; import com.storycove.service.DatabaseManagementService; +import com.storycove.service.LibraryService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.core.io.Resource; import org.springframework.http.HttpHeaders; @@ -12,6 +14,7 @@ import org.springframework.web.multipart.MultipartFile; import java.io.IOException; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; +import java.util.List; import java.util.Map; @RestController @@ -21,6 +24,12 @@ public class DatabaseController { @Autowired private DatabaseManagementService databaseManagementService; + @Autowired + private AsyncBackupService asyncBackupService; + + @Autowired + private LibraryService libraryService; + @PostMapping("/backup") public ResponseEntity backupDatabase() { try { @@ -83,19 +92,141 @@ public class DatabaseController { } @PostMapping("/backup-complete") - public ResponseEntity backupComplete() { + public ResponseEntity> backupCompleteAsync() { try { - Resource backup = databaseManagementService.createCompleteBackup(); - - String timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd_HH-mm-ss")); - String filename = "storycove_complete_backup_" + timestamp + ".zip"; - + String libraryId = libraryService.getCurrentLibraryId(); + if (libraryId == null) { + return ResponseEntity.badRequest() + .body(Map.of("success", false, "message", "No library selected")); + } + + // Start backup job asynchronously + com.storycove.entity.BackupJob job = asyncBackupService.startBackupJob( + libraryId, + com.storycove.entity.BackupJob.BackupType.COMPLETE + ); + + return ResponseEntity.ok(Map.of( + "success", true, + "message", "Backup started", + "jobId", job.getId().toString(), + "status", job.getStatus().toString() + )); + } catch (Exception e) { + return ResponseEntity.internalServerError() + .body(Map.of("success", false, "message", "Failed to start backup: " + e.getMessage())); + } + } + + @GetMapping("/backup-status/{jobId}") + public ResponseEntity> getBackupStatus(@PathVariable String jobId) { + try { + java.util.UUID uuid = java.util.UUID.fromString(jobId); + java.util.Optional jobOpt = asyncBackupService.getJobStatus(uuid); + + if (jobOpt.isEmpty()) { + return ResponseEntity.notFound().build(); + } + + com.storycove.entity.BackupJob job = jobOpt.get(); + + return ResponseEntity.ok(Map.of( + "success", true, + "jobId", job.getId().toString(), + "status", job.getStatus().toString(), + "progress", job.getProgressPercent(), + "fileSizeBytes", job.getFileSizeBytes() != null ? job.getFileSizeBytes() : 0, + "createdAt", job.getCreatedAt().toString(), + "completedAt", job.getCompletedAt() != null ? job.getCompletedAt().toString() : "", + "errorMessage", job.getErrorMessage() != null ? job.getErrorMessage() : "" + )); + } catch (IllegalArgumentException e) { + return ResponseEntity.badRequest() + .body(Map.of("success", false, "message", "Invalid job ID")); + } + } + + @GetMapping("/backup-download/{jobId}") + public ResponseEntity downloadBackup(@PathVariable String jobId) { + try { + java.util.UUID uuid = java.util.UUID.fromString(jobId); + Resource backup = asyncBackupService.getBackupFile(uuid); + + java.util.Optional jobOpt = asyncBackupService.getJobStatus(uuid); + if (jobOpt.isEmpty()) { + return ResponseEntity.notFound().build(); + } + + com.storycove.entity.BackupJob job = jobOpt.get(); + String timestamp = job.getCreatedAt().format(DateTimeFormatter.ofPattern("yyyy-MM-dd_HH-mm-ss")); + String extension = job.getType() == com.storycove.entity.BackupJob.BackupType.COMPLETE ? "zip" : "sql"; + String filename = "storycove_backup_" + timestamp + "." + extension; + return ResponseEntity.ok() .header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + filename + "\"") - .header(HttpHeaders.CONTENT_TYPE, "application/zip") + .header(HttpHeaders.CONTENT_TYPE, + job.getType() == com.storycove.entity.BackupJob.BackupType.COMPLETE + ? "application/zip" + : "application/sql") .body(backup); + } catch (IllegalArgumentException e) { + return ResponseEntity.badRequest().build(); } catch (Exception e) { - throw new RuntimeException("Failed to create complete backup: " + e.getMessage(), e); + throw new RuntimeException("Failed to download backup: " + e.getMessage(), e); + } + } + + @GetMapping("/backup-list") + public ResponseEntity> listBackups() { + try { + String libraryId = libraryService.getCurrentLibraryId(); + if (libraryId == null) { + return ResponseEntity.badRequest() + .body(Map.of("success", false, "message", "No library selected")); + } + + List jobs = asyncBackupService.listBackupJobs(libraryId); + + List> jobsList = jobs.stream() + .map(job -> { + Map jobMap = new java.util.HashMap<>(); + jobMap.put("jobId", job.getId().toString()); + jobMap.put("type", job.getType().toString()); + jobMap.put("status", job.getStatus().toString()); + jobMap.put("progress", job.getProgressPercent()); + jobMap.put("fileSizeBytes", job.getFileSizeBytes() != null ? job.getFileSizeBytes() : 0L); + jobMap.put("createdAt", job.getCreatedAt().toString()); + jobMap.put("completedAt", job.getCompletedAt() != null ? job.getCompletedAt().toString() : ""); + return jobMap; + }) + .collect(java.util.stream.Collectors.toList()); + + return ResponseEntity.ok(Map.of( + "success", true, + "backups", jobsList + )); + } catch (Exception e) { + return ResponseEntity.internalServerError() + .body(Map.of("success", false, "message", "Failed to list backups: " + e.getMessage())); + } + } + + @DeleteMapping("/backup/{jobId}") + public ResponseEntity> deleteBackup(@PathVariable String jobId) { + try { + java.util.UUID uuid = java.util.UUID.fromString(jobId); + asyncBackupService.deleteBackupJob(uuid); + + return ResponseEntity.ok(Map.of( + "success", true, + "message", "Backup deleted successfully" + )); + } catch (IllegalArgumentException e) { + return ResponseEntity.badRequest() + .body(Map.of("success", false, "message", "Invalid job ID")); + } catch (Exception e) { + return ResponseEntity.internalServerError() + .body(Map.of("success", false, "message", "Failed to delete backup: " + e.getMessage())); } } diff --git a/backend/src/main/java/com/storycove/entity/BackupJob.java b/backend/src/main/java/com/storycove/entity/BackupJob.java new file mode 100644 index 0000000..189b62c --- /dev/null +++ b/backend/src/main/java/com/storycove/entity/BackupJob.java @@ -0,0 +1,195 @@ +package com.storycove.entity; + +import jakarta.persistence.*; +import java.time.LocalDateTime; +import java.util.UUID; + +@Entity +@Table(name = "backup_jobs") +public class BackupJob { + + @Id + @GeneratedValue(strategy = GenerationType.UUID) + private UUID id; + + @Column(nullable = false) + private String libraryId; + + @Column(nullable = false) + @Enumerated(EnumType.STRING) + private BackupType type; + + @Column(nullable = false) + @Enumerated(EnumType.STRING) + private BackupStatus status; + + @Column + private String filePath; + + @Column + private Long fileSizeBytes; + + @Column + private Integer progressPercent; + + @Column(length = 1000) + private String errorMessage; + + @Column(nullable = false) + private LocalDateTime createdAt; + + @Column + private LocalDateTime startedAt; + + @Column + private LocalDateTime completedAt; + + @Column + private LocalDateTime expiresAt; + + @PrePersist + protected void onCreate() { + createdAt = LocalDateTime.now(); + // Backups expire after 24 hours + expiresAt = LocalDateTime.now().plusDays(1); + } + + // Enums + public enum BackupType { + DATABASE_ONLY, + COMPLETE + } + + public enum BackupStatus { + PENDING, + IN_PROGRESS, + COMPLETED, + FAILED, + EXPIRED + } + + // Constructors + public BackupJob() { + } + + public BackupJob(String libraryId, BackupType type) { + this.libraryId = libraryId; + this.type = type; + this.status = BackupStatus.PENDING; + this.progressPercent = 0; + } + + // Getters and Setters + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public String getLibraryId() { + return libraryId; + } + + public void setLibraryId(String libraryId) { + this.libraryId = libraryId; + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + public BackupStatus getStatus() { + return status; + } + + public void setStatus(BackupStatus status) { + this.status = status; + } + + public String getFilePath() { + return filePath; + } + + public void setFilePath(String filePath) { + this.filePath = filePath; + } + + public Long getFileSizeBytes() { + return fileSizeBytes; + } + + public void setFileSizeBytes(Long fileSizeBytes) { + this.fileSizeBytes = fileSizeBytes; + } + + public Integer getProgressPercent() { + return progressPercent; + } + + public void setProgressPercent(Integer progressPercent) { + this.progressPercent = progressPercent; + } + + public String getErrorMessage() { + return errorMessage; + } + + public void setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + } + + public LocalDateTime getCreatedAt() { + return createdAt; + } + + public void setCreatedAt(LocalDateTime createdAt) { + this.createdAt = createdAt; + } + + public LocalDateTime getStartedAt() { + return startedAt; + } + + public void setStartedAt(LocalDateTime startedAt) { + this.startedAt = startedAt; + } + + public LocalDateTime getCompletedAt() { + return completedAt; + } + + public void setCompletedAt(LocalDateTime completedAt) { + this.completedAt = completedAt; + } + + public LocalDateTime getExpiresAt() { + return expiresAt; + } + + public void setExpiresAt(LocalDateTime expiresAt) { + this.expiresAt = expiresAt; + } + + // Helper methods + public boolean isExpired() { + return LocalDateTime.now().isAfter(expiresAt); + } + + public boolean isCompleted() { + return status == BackupStatus.COMPLETED; + } + + public boolean isFailed() { + return status == BackupStatus.FAILED; + } + + public boolean isInProgress() { + return status == BackupStatus.IN_PROGRESS; + } +} diff --git a/backend/src/main/java/com/storycove/repository/BackupJobRepository.java b/backend/src/main/java/com/storycove/repository/BackupJobRepository.java new file mode 100644 index 0000000..86cfe46 --- /dev/null +++ b/backend/src/main/java/com/storycove/repository/BackupJobRepository.java @@ -0,0 +1,25 @@ +package com.storycove.repository; + +import com.storycove.entity.BackupJob; +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Modifying; +import org.springframework.data.jpa.repository.Query; +import org.springframework.data.repository.query.Param; +import org.springframework.stereotype.Repository; + +import java.time.LocalDateTime; +import java.util.List; +import java.util.UUID; + +@Repository +public interface BackupJobRepository extends JpaRepository { + + List findByLibraryIdOrderByCreatedAtDesc(String libraryId); + + @Query("SELECT bj FROM BackupJob bj WHERE bj.expiresAt < :now AND bj.status = 'COMPLETED'") + List findExpiredJobs(@Param("now") LocalDateTime now); + + @Modifying + @Query("UPDATE BackupJob bj SET bj.status = 'EXPIRED' WHERE bj.expiresAt < :now AND bj.status = 'COMPLETED'") + int markExpiredJobs(@Param("now") LocalDateTime now); +} diff --git a/backend/src/main/java/com/storycove/service/AsyncBackupService.java b/backend/src/main/java/com/storycove/service/AsyncBackupService.java new file mode 100644 index 0000000..0570375 --- /dev/null +++ b/backend/src/main/java/com/storycove/service/AsyncBackupService.java @@ -0,0 +1,245 @@ +package com.storycove.service; + +import com.storycove.entity.BackupJob; +import com.storycove.repository.BackupJobRepository; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.io.FileSystemResource; +import org.springframework.core.io.Resource; +import org.springframework.scheduling.annotation.Async; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.sql.SQLException; +import java.time.LocalDateTime; +import java.util.List; +import java.util.Optional; +import java.util.UUID; + +@Service +public class AsyncBackupService { + + private static final Logger logger = LoggerFactory.getLogger(AsyncBackupService.class); + + @Value("${storycove.upload.dir:/app/images}") + private String uploadDir; + + @Autowired + private BackupJobRepository backupJobRepository; + + @Autowired + private DatabaseManagementService databaseManagementService; + + @Autowired + private LibraryService libraryService; + + /** + * Start a backup job asynchronously + */ + @Transactional + public BackupJob startBackupJob(String libraryId, BackupJob.BackupType type) { + BackupJob job = new BackupJob(libraryId, type); + job = backupJobRepository.save(job); + + // Start backup in background + executeBackupAsync(job.getId()); + + return job; + } + + /** + * Execute backup asynchronously + */ + @Async + @Transactional + public void executeBackupAsync(UUID jobId) { + Optional jobOpt = backupJobRepository.findById(jobId); + if (jobOpt.isEmpty()) { + logger.error("Backup job not found: {}", jobId); + return; + } + + BackupJob job = jobOpt.get(); + job.setStatus(BackupJob.BackupStatus.IN_PROGRESS); + job.setStartedAt(LocalDateTime.now()); + job.setProgressPercent(0); + backupJobRepository.save(job); + + try { + logger.info("Starting backup job {} for library {}", job.getId(), job.getLibraryId()); + + // Switch to the correct library + if (!job.getLibraryId().equals(libraryService.getCurrentLibraryId())) { + libraryService.switchToLibraryAfterAuthentication(job.getLibraryId()); + } + + // Create backup file + Path backupDir = Paths.get(uploadDir, "backups", job.getLibraryId()); + Files.createDirectories(backupDir); + + String filename = String.format("backup_%s_%s.%s", + job.getId().toString(), + LocalDateTime.now().toString().replaceAll(":", "-"), + job.getType() == BackupJob.BackupType.COMPLETE ? "zip" : "sql"); + + Path backupFile = backupDir.resolve(filename); + + job.setProgressPercent(10); + backupJobRepository.save(job); + + // Create the backup + Resource backupResource; + if (job.getType() == BackupJob.BackupType.COMPLETE) { + backupResource = databaseManagementService.createCompleteBackup(); + } else { + backupResource = databaseManagementService.createBackup(); + } + + job.setProgressPercent(80); + backupJobRepository.save(job); + + // Copy resource to permanent file + try (var inputStream = backupResource.getInputStream(); + var outputStream = Files.newOutputStream(backupFile)) { + inputStream.transferTo(outputStream); + } + + job.setProgressPercent(95); + backupJobRepository.save(job); + + // Set file info + job.setFilePath(backupFile.toString()); + job.setFileSizeBytes(Files.size(backupFile)); + job.setStatus(BackupJob.BackupStatus.COMPLETED); + job.setCompletedAt(LocalDateTime.now()); + job.setProgressPercent(100); + + logger.info("Backup job {} completed successfully. File size: {} bytes", + job.getId(), job.getFileSizeBytes()); + + } catch (Exception e) { + logger.error("Backup job {} failed", job.getId(), e); + job.setStatus(BackupJob.BackupStatus.FAILED); + job.setErrorMessage(e.getMessage()); + job.setCompletedAt(LocalDateTime.now()); + } finally { + backupJobRepository.save(job); + } + } + + /** + * Get backup job status + */ + public Optional getJobStatus(UUID jobId) { + return backupJobRepository.findById(jobId); + } + + /** + * Get backup file for download + */ + public Resource getBackupFile(UUID jobId) throws IOException { + Optional jobOpt = backupJobRepository.findById(jobId); + if (jobOpt.isEmpty()) { + throw new IOException("Backup job not found"); + } + + BackupJob job = jobOpt.get(); + + if (!job.isCompleted()) { + throw new IOException("Backup is not completed yet"); + } + + if (job.isExpired()) { + throw new IOException("Backup has expired"); + } + + if (job.getFilePath() == null) { + throw new IOException("Backup file path not set"); + } + + Path backupPath = Paths.get(job.getFilePath()); + if (!Files.exists(backupPath)) { + throw new IOException("Backup file not found"); + } + + return new FileSystemResource(backupPath); + } + + /** + * List backup jobs for a library + */ + public List listBackupJobs(String libraryId) { + return backupJobRepository.findByLibraryIdOrderByCreatedAtDesc(libraryId); + } + + /** + * Clean up expired backup jobs and their files + * Runs daily at 2 AM + */ + @Scheduled(cron = "0 0 2 * * ?") + @Transactional + public void cleanupExpiredBackups() { + logger.info("Starting cleanup of expired backups"); + + LocalDateTime now = LocalDateTime.now(); + + // Mark expired jobs + int markedCount = backupJobRepository.markExpiredJobs(now); + logger.info("Marked {} jobs as expired", markedCount); + + // Find all expired jobs to delete their files + List expiredJobs = backupJobRepository.findExpiredJobs(now); + + for (BackupJob job : expiredJobs) { + if (job.getFilePath() != null) { + try { + Path filePath = Paths.get(job.getFilePath()); + if (Files.exists(filePath)) { + Files.delete(filePath); + logger.info("Deleted expired backup file: {}", filePath); + } + } catch (IOException e) { + logger.warn("Failed to delete expired backup file: {}", job.getFilePath(), e); + } + } + + // Delete the job record + backupJobRepository.delete(job); + } + + logger.info("Cleanup completed. Deleted {} expired backups", expiredJobs.size()); + } + + /** + * Delete a specific backup job and its file + */ + @Transactional + public void deleteBackupJob(UUID jobId) throws IOException { + Optional jobOpt = backupJobRepository.findById(jobId); + if (jobOpt.isEmpty()) { + throw new IOException("Backup job not found"); + } + + BackupJob job = jobOpt.get(); + + // Delete file if it exists + if (job.getFilePath() != null) { + Path filePath = Paths.get(job.getFilePath()); + if (Files.exists(filePath)) { + Files.delete(filePath); + logger.info("Deleted backup file: {}", filePath); + } + } + + // Delete job record + backupJobRepository.delete(job); + logger.info("Deleted backup job: {}", jobId); + } +} diff --git a/frontend/src/components/settings/SystemSettings.tsx b/frontend/src/components/settings/SystemSettings.tsx index 0df0f6e..3bef91c 100644 --- a/frontend/src/components/settings/SystemSettings.tsx +++ b/frontend/src/components/settings/SystemSettings.tsx @@ -33,11 +33,18 @@ export default function SystemSettings({}: SystemSettingsProps) { }); const [databaseStatus, setDatabaseStatus] = useState<{ - completeBackup: { loading: boolean; message: string; success?: boolean }; + completeBackup: { + loading: boolean; + message: string; + success?: boolean; + jobId?: string; + progress?: number; + downloadReady?: boolean; + }; completeRestore: { loading: boolean; message: string; success?: boolean }; completeClear: { loading: boolean; message: string; success?: boolean }; }>({ - completeBackup: { loading: false, message: '' }, + completeBackup: { loading: false, message: '', progress: 0 }, completeRestore: { loading: false, message: '' }, completeClear: { loading: false, message: '' } }); @@ -73,43 +80,117 @@ export default function SystemSettings({}: SystemSettingsProps) { const handleCompleteBackup = async () => { setDatabaseStatus(prev => ({ ...prev, - completeBackup: { loading: true, message: 'Creating complete backup...', success: undefined } + completeBackup: { loading: true, message: 'Starting backup...', success: undefined, progress: 0, downloadReady: false } })); try { - const backupBlob = await databaseApi.backupComplete(); - - // Create download link - const url = window.URL.createObjectURL(backupBlob); - const link = document.createElement('a'); - link.href = url; - - const timestamp = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19); - link.download = `storycove_complete_backup_${timestamp}.zip`; - - document.body.appendChild(link); - link.click(); - document.body.removeChild(link); - window.URL.revokeObjectURL(url); + // Start the async backup job + const startResponse = await databaseApi.backupComplete(); + const jobId = startResponse.jobId; setDatabaseStatus(prev => ({ ...prev, - completeBackup: { loading: false, message: 'Complete backup downloaded successfully', success: true } + completeBackup: { ...prev.completeBackup, jobId, message: 'Backup in progress...' } })); + + // Poll for progress + const pollInterval = setInterval(async () => { + try { + const status = await databaseApi.getBackupStatus(jobId); + + if (status.status === 'COMPLETED') { + clearInterval(pollInterval); + setDatabaseStatus(prev => ({ + ...prev, + completeBackup: { + loading: false, + message: 'Backup completed! Ready to download.', + success: true, + jobId, + progress: 100, + downloadReady: true + } + })); + + // Clear message after 30 seconds (keep download button visible) + setTimeout(() => { + setDatabaseStatus(prev => ({ + ...prev, + completeBackup: { ...prev.completeBackup, message: '' } + })); + }, 30000); + } else if (status.status === 'FAILED') { + clearInterval(pollInterval); + setDatabaseStatus(prev => ({ + ...prev, + completeBackup: { + loading: false, + message: `Backup failed: ${status.errorMessage}`, + success: false, + progress: 0, + downloadReady: false + } + })); + } else { + // Update progress + setDatabaseStatus(prev => ({ + ...prev, + completeBackup: { + ...prev.completeBackup, + progress: status.progress, + message: `Creating backup... ${status.progress}%` + } + })); + } + } catch (pollError: any) { + clearInterval(pollInterval); + setDatabaseStatus(prev => ({ + ...prev, + completeBackup: { + loading: false, + message: `Failed to check backup status: ${pollError.message}`, + success: false, + progress: 0, + downloadReady: false + } + })); + } + }, 2000); // Poll every 2 seconds + } catch (error: any) { setDatabaseStatus(prev => ({ ...prev, - completeBackup: { loading: false, message: error.message || 'Complete backup failed', success: false } + completeBackup: { + loading: false, + message: error.message || 'Failed to start backup', + success: false, + progress: 0, + downloadReady: false + } })); } + }; - // Clear message after 5 seconds - setTimeout(() => { - setDatabaseStatus(prev => ({ - ...prev, - completeBackup: { loading: false, message: '', success: undefined } - })); - }, 5000); + const handleDownloadBackup = (jobId: string) => { + const downloadUrl = databaseApi.downloadBackup(jobId); + const link = document.createElement('a'); + link.href = downloadUrl; + link.download = ''; // Filename will be set by server + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + // Clear the download ready state after download + setDatabaseStatus(prev => ({ + ...prev, + completeBackup: { + loading: false, + message: 'Backup downloaded successfully', + success: true, + progress: 100, + downloadReady: false + } + })); }; const handleCompleteRestore = async (event: React.ChangeEvent) => { @@ -792,20 +873,50 @@ export default function SystemSettings({}: SystemSettingsProps) {

Download a complete backup as a ZIP file. This includes your database AND all uploaded files (cover images, avatars). This is a comprehensive backup of your entire StoryCove installation.

- +
+ + + {databaseStatus.completeBackup.downloadReady && databaseStatus.completeBackup.jobId && ( + + )} +
+ + {databaseStatus.completeBackup.loading && databaseStatus.completeBackup.progress !== undefined && ( +
+
+ Progress + {databaseStatus.completeBackup.progress}% +
+
+
+
+
+ )} + {databaseStatus.completeBackup.message && (
{databaseStatus.completeBackup.message}
diff --git a/frontend/src/lib/api.ts b/frontend/src/lib/api.ts index bbd57ec..80859aa 100644 --- a/frontend/src/lib/api.ts +++ b/frontend/src/lib/api.ts @@ -1013,10 +1013,47 @@ export const databaseApi = { return response.data; }, - backupComplete: async (): Promise => { - const response = await api.post('/database/backup-complete', {}, { - responseType: 'blob' - }); + backupComplete: async (): Promise<{ success: boolean; jobId: string; status: string; message: string }> => { + const response = await api.post('/database/backup-complete'); + return response.data; + }, + + getBackupStatus: async (jobId: string): Promise<{ + success: boolean; + jobId: string; + status: string; + progress: number; + fileSizeBytes: number; + createdAt: string; + completedAt: string; + errorMessage: string; + }> => { + const response = await api.get(`/database/backup-status/${jobId}`); + return response.data; + }, + + downloadBackup: (jobId: string): string => { + return `/api/database/backup-download/${jobId}`; + }, + + listBackups: async (): Promise<{ + success: boolean; + backups: Array<{ + jobId: string; + type: string; + status: string; + progress: number; + fileSizeBytes: number; + createdAt: string; + completedAt: string; + }>; + }> => { + const response = await api.get('/database/backup-list'); + return response.data; + }, + + deleteBackup: async (jobId: string): Promise<{ success: boolean; message: string }> => { + const response = await api.delete(`/database/backup/${jobId}`); return response.data; }, From 6a38189ef0d6e4bdef1d2910ce17a50b64cb5795 Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 12:30:28 +0200 Subject: [PATCH 3/9] fix images --- backend/apply_backup_jobs_migration.sh | 54 +++++++++++++++++++ backend/create_backup_jobs_table.sql | 29 ++++++++++ deploy.sh | 19 +++++++ .../src/components/stories/SlateEditor.tsx | 7 +-- 4 files changed, 106 insertions(+), 3 deletions(-) create mode 100755 backend/apply_backup_jobs_migration.sh create mode 100644 backend/create_backup_jobs_table.sql diff --git a/backend/apply_backup_jobs_migration.sh b/backend/apply_backup_jobs_migration.sh new file mode 100755 index 0000000..afc4e0f --- /dev/null +++ b/backend/apply_backup_jobs_migration.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# Script to apply backup_jobs table migration to all library databases +# This should be run from the backend directory + +set -e + +# Use full docker path +DOCKER="/usr/local/bin/docker" + +echo "Applying backup_jobs table migration..." + +# Get database connection details from environment or use defaults +DB_HOST="${POSTGRES_HOST:-postgres}" +DB_PORT="${POSTGRES_PORT:-5432}" +DB_USER="${POSTGRES_USER:-storycove}" +DB_PASSWORD="${POSTGRES_PASSWORD:-password}" + +# List of databases to update +DATABASES=("storycove" "storycove_afterdark") + +for DB_NAME in "${DATABASES[@]}"; do + echo "" + echo "Applying migration to database: $DB_NAME" + + # Check if database exists + if $DOCKER exec storycove-postgres-1 psql -U "$DB_USER" -lqt | cut -d \| -f 1 | grep -qw "$DB_NAME"; then + echo "Database $DB_NAME exists, applying migration..." + + # Apply migration + $DOCKER exec -i storycove-postgres-1 psql -U "$DB_USER" -d "$DB_NAME" < create_backup_jobs_table.sql + + if [ $? -eq 0 ]; then + echo "✓ Migration applied successfully to $DB_NAME" + else + echo "✗ Failed to apply migration to $DB_NAME" + exit 1 + fi + else + echo "⚠ Database $DB_NAME does not exist, skipping..." + fi +done + +echo "" +echo "Migration complete!" +echo "" +echo "Verifying table creation..." +for DB_NAME in "${DATABASES[@]}"; do + if $DOCKER exec storycove-postgres-1 psql -U "$DB_USER" -lqt | cut -d \| -f 1 | grep -qw "$DB_NAME"; then + echo "" + echo "Checking $DB_NAME:" + $DOCKER exec storycove-postgres-1 psql -U "$DB_USER" -d "$DB_NAME" -c "\d backup_jobs" 2>/dev/null || echo " Table not found in $DB_NAME" + fi +done diff --git a/backend/create_backup_jobs_table.sql b/backend/create_backup_jobs_table.sql new file mode 100644 index 0000000..41de698 --- /dev/null +++ b/backend/create_backup_jobs_table.sql @@ -0,0 +1,29 @@ +-- Create backup_jobs table for async backup job tracking +-- This should be run on all library databases (default and afterdark) + +CREATE TABLE IF NOT EXISTS backup_jobs ( + id UUID PRIMARY KEY, + library_id VARCHAR(255) NOT NULL, + type VARCHAR(50) NOT NULL CHECK (type IN ('DATABASE_ONLY', 'COMPLETE')), + status VARCHAR(50) NOT NULL CHECK (status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED', 'EXPIRED')), + file_path VARCHAR(1000), + file_size_bytes BIGINT, + progress_percent INTEGER, + error_message VARCHAR(1000), + created_at TIMESTAMP NOT NULL, + started_at TIMESTAMP, + completed_at TIMESTAMP, + expires_at TIMESTAMP +); + +-- Create index on library_id for faster lookups +CREATE INDEX IF NOT EXISTS idx_backup_jobs_library_id ON backup_jobs(library_id); + +-- Create index on status for cleanup queries +CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status); + +-- Create index on expires_at for cleanup queries +CREATE INDEX IF NOT EXISTS idx_backup_jobs_expires_at ON backup_jobs(expires_at); + +-- Create index on created_at for ordering +CREATE INDEX IF NOT EXISTS idx_backup_jobs_created_at ON backup_jobs(created_at DESC); diff --git a/deploy.sh b/deploy.sh index d1a93fc..6af684b 100755 --- a/deploy.sh +++ b/deploy.sh @@ -55,6 +55,25 @@ if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then exit 1 fi +# Apply database migrations +echo -e "${YELLOW}🗄️ Applying database migrations...${NC}" +if [ -f "backend/create_backup_jobs_table.sql" ]; then + echo "Applying backup_jobs table migration..." + + # Get list of databases + DATABASES=$(docker-compose exec -T postgres psql -U storycove -lqt | cut -d \| -f 1 | grep -E '^ storycove' | sed 's/^[ \t]*//') + + # Apply migration to each database + for DB_NAME in $DATABASES; do + echo " - Applying to database: $DB_NAME" + docker-compose exec -T postgres psql -U storycove -d "$DB_NAME" < backend/create_backup_jobs_table.sql 2>&1 | grep -E "(CREATE|ERROR)" || true + done + + echo -e "${GREEN}✅ Database migrations applied${NC}" +else + echo -e "${YELLOW}⚠️ No migration files found, skipping...${NC}" +fi + # Check if Solr is ready echo -e "${YELLOW}🔍 Checking Solr health...${NC}" RETRY_COUNT=0 diff --git a/frontend/src/components/stories/SlateEditor.tsx b/frontend/src/components/stories/SlateEditor.tsx index b60c848..da1a0da 100644 --- a/frontend/src/components/stories/SlateEditor.tsx +++ b/frontend/src/components/stories/SlateEditor.tsx @@ -114,9 +114,10 @@ const htmlToSlate = (html: string): Descendant[] => { const img = element as HTMLImageElement; results.push({ type: 'image', - src: img.src || img.getAttribute('src') || '', - alt: img.alt || img.getAttribute('alt') || '', - caption: img.title || img.getAttribute('title') || '', + // Use getAttribute to preserve relative URLs instead of .src which converts to absolute + src: img.getAttribute('src') || '', + alt: img.getAttribute('alt') || '', + caption: img.getAttribute('title') || '', children: [{ text: '' }] // Images need children in Slate }); break; From 70599083b810103ea82e49490246b31607f4d601 Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 12:43:58 +0200 Subject: [PATCH 4/9] db migration --- apply_migration_production.sh | 45 +++++++++++++++++++++++++++++++++++ docker-compose.yml | 28 ++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100755 apply_migration_production.sh diff --git a/apply_migration_production.sh b/apply_migration_production.sh new file mode 100755 index 0000000..3622da6 --- /dev/null +++ b/apply_migration_production.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# Run this script on your production server to apply the backup_jobs table migration +# to all library databases + +echo "Applying backup_jobs table migration to all databases..." +echo "" + +# Apply to each database +for DB in storycove storycove_afterdark storycove_clas storycove_secret; do + echo "Applying to $DB..." + docker-compose exec -T postgres psql -U storycove -d "$DB" <<'SQL' +CREATE TABLE IF NOT EXISTS backup_jobs ( + id UUID PRIMARY KEY, + library_id VARCHAR(255) NOT NULL, + type VARCHAR(50) NOT NULL CHECK (type IN ('DATABASE_ONLY', 'COMPLETE')), + status VARCHAR(50) NOT NULL CHECK (status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED', 'EXPIRED')), + file_path VARCHAR(1000), + file_size_bytes BIGINT, + progress_percent INTEGER, + error_message VARCHAR(1000), + created_at TIMESTAMP NOT NULL, + started_at TIMESTAMP, + completed_at TIMESTAMP, + expires_at TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_backup_jobs_library_id ON backup_jobs(library_id); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_expires_at ON backup_jobs(expires_at); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_created_at ON backup_jobs(created_at DESC); +SQL + echo "✓ Done with $DB" + echo "" +done + +echo "Migration complete! Verifying..." +echo "" + +# Verify tables exist +for DB in storycove storycove_afterdark storycove_clas storycove_secret; do + echo "Checking $DB:" + docker-compose exec -T postgres psql -U storycove -d "$DB" -c "\d backup_jobs" 2>&1 | grep -E "Table|does not exist" || echo " ✓ Table exists" + echo "" +done diff --git a/docker-compose.yml b/docker-compose.yml index d1614cf..72f079c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -66,6 +66,34 @@ services: networks: - storycove-network + migrations: + image: postgres:15-alpine + depends_on: + - postgres + volumes: + - ./backend/create_backup_jobs_table.sql:/migrations/create_backup_jobs_table.sql:ro + networks: + - storycove-network + entrypoint: /bin/sh + command: > + -c " + echo 'Waiting for postgres to be ready...'; + sleep 5; + echo 'Applying migrations to all databases...'; + for DB in storycove storycove_afterdark storycove_clas storycove_secret; do + echo \"Checking if database \$$DB exists...\"; + if PGPASSWORD=${DB_PASSWORD} psql -h postgres -U storycove -lqt | cut -d \\| -f 1 | grep -qw \$$DB; then + echo \"Applying migration to \$$DB...\"; + PGPASSWORD=${DB_PASSWORD} psql -h postgres -U storycove -d \$$DB -f /migrations/create_backup_jobs_table.sql; + echo \"✓ Migration applied to \$$DB\"; + else + echo \"⚠ Database \$$DB does not exist, skipping...\"; + fi; + done; + echo 'All migrations complete!'; + " + restart: "no" + solr: build: From 1ee9af8f280f3b1ec58ac2d108106e54bf844abb Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 12:55:56 +0200 Subject: [PATCH 5/9] deployment fix? --- deploy.sh | 18 ++---------------- docker-compose.yml | 32 +++++++++++++++++++------------- 2 files changed, 21 insertions(+), 29 deletions(-) diff --git a/deploy.sh b/deploy.sh index 6af684b..bd5b2d8 100755 --- a/deploy.sh +++ b/deploy.sh @@ -57,22 +57,8 @@ fi # Apply database migrations echo -e "${YELLOW}🗄️ Applying database migrations...${NC}" -if [ -f "backend/create_backup_jobs_table.sql" ]; then - echo "Applying backup_jobs table migration..." - - # Get list of databases - DATABASES=$(docker-compose exec -T postgres psql -U storycove -lqt | cut -d \| -f 1 | grep -E '^ storycove' | sed 's/^[ \t]*//') - - # Apply migration to each database - for DB_NAME in $DATABASES; do - echo " - Applying to database: $DB_NAME" - docker-compose exec -T postgres psql -U storycove -d "$DB_NAME" < backend/create_backup_jobs_table.sql 2>&1 | grep -E "(CREATE|ERROR)" || true - done - - echo -e "${GREEN}✅ Database migrations applied${NC}" -else - echo -e "${YELLOW}⚠️ No migration files found, skipping...${NC}" -fi +docker-compose run --rm migrations +echo -e "${GREEN}✅ Database migrations applied${NC}" # Check if Solr is ready echo -e "${YELLOW}🔍 Checking Solr health...${NC}" diff --git a/docker-compose.yml b/docker-compose.yml index 72f079c..00c374e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -65,34 +65,40 @@ services: - postgres_data:/var/lib/postgresql/data networks: - storycove-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U storycove -d storycove"] + interval: 5s + timeout: 5s + retries: 5 migrations: image: postgres:15-alpine depends_on: - - postgres + postgres: + condition: service_healthy volumes: - ./backend/create_backup_jobs_table.sql:/migrations/create_backup_jobs_table.sql:ro networks: - storycove-network + environment: + - PGPASSWORD=${DB_PASSWORD} entrypoint: /bin/sh command: > -c " - echo 'Waiting for postgres to be ready...'; - sleep 5; - echo 'Applying migrations to all databases...'; + echo '🗄️ Applying database migrations...'; for DB in storycove storycove_afterdark storycove_clas storycove_secret; do - echo \"Checking if database \$$DB exists...\"; - if PGPASSWORD=${DB_PASSWORD} psql -h postgres -U storycove -lqt | cut -d \\| -f 1 | grep -qw \$$DB; then - echo \"Applying migration to \$$DB...\"; - PGPASSWORD=${DB_PASSWORD} psql -h postgres -U storycove -d \$$DB -f /migrations/create_backup_jobs_table.sql; - echo \"✓ Migration applied to \$$DB\"; - else - echo \"⚠ Database \$$DB does not exist, skipping...\"; + if psql -h postgres -U storycove -lqt | cut -d '|' -f 1 | grep -qw \$$DB; then + echo \" ✓ Applying migration to \$$DB...\"; + psql -h postgres -U storycove -d \$$DB -f /migrations/create_backup_jobs_table.sql > /dev/null 2>&1 || true; fi; done; - echo 'All migrations complete!'; + echo '✅ Migrations complete!'; + sleep infinity; " - restart: "no" + deploy: + restart_policy: + condition: on-failure + max_attempts: 1 solr: From 32544d4f4aaf8ca4100029eaa5ad169208435c53 Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 14:13:45 +0200 Subject: [PATCH 6/9] different approach to migration --- .../config/DatabaseMigrationRunner.java | 111 ++++++++++++++++++ docker-compose.yml | 31 +---- 2 files changed, 112 insertions(+), 30 deletions(-) create mode 100644 backend/src/main/java/com/storycove/config/DatabaseMigrationRunner.java diff --git a/backend/src/main/java/com/storycove/config/DatabaseMigrationRunner.java b/backend/src/main/java/com/storycove/config/DatabaseMigrationRunner.java new file mode 100644 index 0000000..9e2839e --- /dev/null +++ b/backend/src/main/java/com/storycove/config/DatabaseMigrationRunner.java @@ -0,0 +1,111 @@ +package com.storycove.config; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.CommandLineRunner; +import org.springframework.core.annotation.Order; +import org.springframework.stereotype.Component; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.Statement; +import java.util.Arrays; +import java.util.List; + +/** + * Runs database migrations on application startup. + * This ensures all library databases have the required schema, + * particularly for tables like backup_jobs that were added after initial deployment. + */ +@Component +@Order(1) // Run early in startup sequence +public class DatabaseMigrationRunner implements CommandLineRunner { + + private static final Logger logger = LoggerFactory.getLogger(DatabaseMigrationRunner.class); + + @Autowired + private DataSource dataSource; + + @Value("${spring.datasource.username}") + private String dbUsername; + + @Value("${spring.datasource.password}") + private String dbPassword; + + // List of all library databases that need migrations + private static final List LIBRARY_DATABASES = Arrays.asList( + "storycove", // default database + "storycove_afterdark", + "storycove_clas", + "storycove_secret" + ); + + // SQL for backup_jobs table migration (idempotent) + private static final String BACKUP_JOBS_MIGRATION = """ + CREATE TABLE IF NOT EXISTS backup_jobs ( + id UUID PRIMARY KEY, + library_id VARCHAR(255) NOT NULL, + type VARCHAR(50) NOT NULL CHECK (type IN ('DATABASE_ONLY', 'COMPLETE')), + status VARCHAR(50) NOT NULL CHECK (status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED', 'EXPIRED')), + file_path VARCHAR(1000), + file_size_bytes BIGINT, + progress_percent INTEGER, + error_message VARCHAR(1000), + created_at TIMESTAMP NOT NULL, + started_at TIMESTAMP, + completed_at TIMESTAMP, + expires_at TIMESTAMP + ); + + CREATE INDEX IF NOT EXISTS idx_backup_jobs_library_id ON backup_jobs(library_id); + CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status); + CREATE INDEX IF NOT EXISTS idx_backup_jobs_expires_at ON backup_jobs(expires_at); + CREATE INDEX IF NOT EXISTS idx_backup_jobs_created_at ON backup_jobs(created_at DESC); + """; + + @Override + public void run(String... args) throws Exception { + logger.info("🗄️ Starting database migrations..."); + + for (String database : LIBRARY_DATABASES) { + try { + applyMigrations(database); + logger.info("✅ Successfully applied migrations to database: {}", database); + } catch (Exception e) { + // Log error but don't fail startup if database doesn't exist yet + if (e.getMessage() != null && e.getMessage().contains("does not exist")) { + logger.warn("⚠️ Database {} does not exist yet, skipping migrations", database); + } else { + logger.error("❌ Failed to apply migrations to database: {}", database, e); + // Don't throw - allow application to start even if some migrations fail + } + } + } + + logger.info("✅ Database migrations completed"); + } + + private void applyMigrations(String database) throws Exception { + // We need to connect directly to each database, not through SmartRoutingDataSource + // Build connection URL from the default datasource URL + String originalUrl = dataSource.getConnection().getMetaData().getURL(); + String baseUrl = originalUrl.substring(0, originalUrl.lastIndexOf('/')); + String targetUrl = baseUrl + "/" + database; + + // Connect directly to target database using credentials from application properties + try (Connection conn = java.sql.DriverManager.getConnection( + targetUrl, + dbUsername, + dbPassword + )) { + // Apply backup_jobs migration + try (Statement stmt = conn.createStatement()) { + stmt.execute(BACKUP_JOBS_MIGRATION); + } + + logger.debug("Applied backup_jobs migration to {}", database); + } + } +} diff --git a/docker-compose.yml b/docker-compose.yml index 00c374e..24d5981 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -46,7 +46,7 @@ services: - library_config:/app/config depends_on: postgres: - condition: service_started + condition: service_healthy solr: condition: service_started networks: @@ -71,35 +71,6 @@ services: timeout: 5s retries: 5 - migrations: - image: postgres:15-alpine - depends_on: - postgres: - condition: service_healthy - volumes: - - ./backend/create_backup_jobs_table.sql:/migrations/create_backup_jobs_table.sql:ro - networks: - - storycove-network - environment: - - PGPASSWORD=${DB_PASSWORD} - entrypoint: /bin/sh - command: > - -c " - echo '🗄️ Applying database migrations...'; - for DB in storycove storycove_afterdark storycove_clas storycove_secret; do - if psql -h postgres -U storycove -lqt | cut -d '|' -f 1 | grep -qw \$$DB; then - echo \" ✓ Applying migration to \$$DB...\"; - psql -h postgres -U storycove -d \$$DB -f /migrations/create_backup_jobs_table.sql > /dev/null 2>&1 || true; - fi; - done; - echo '✅ Migrations complete!'; - sleep infinity; - " - deploy: - restart_policy: - condition: on-failure - max_attempts: 1 - solr: build: From 1c004eb7d619319401480f309c9d5d72ebce46e2 Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 14:25:12 +0200 Subject: [PATCH 7/9] fix backup async --- .../java/com/storycove/service/AsyncBackupService.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/backend/src/main/java/com/storycove/service/AsyncBackupService.java b/backend/src/main/java/com/storycove/service/AsyncBackupService.java index 0570375..84d0baa 100644 --- a/backend/src/main/java/com/storycove/service/AsyncBackupService.java +++ b/backend/src/main/java/com/storycove/service/AsyncBackupService.java @@ -48,7 +48,10 @@ public class AsyncBackupService { BackupJob job = new BackupJob(libraryId, type); job = backupJobRepository.save(job); - // Start backup in background + // Force flush to ensure job is committed to DB before async execution + backupJobRepository.flush(); + + // Start backup in background (async method will run in separate thread after this transaction commits) executeBackupAsync(job.getId()); return job; @@ -58,7 +61,7 @@ public class AsyncBackupService { * Execute backup asynchronously */ @Async - @Transactional + @Transactional(propagation = org.springframework.transaction.annotation.Propagation.REQUIRES_NEW) public void executeBackupAsync(UUID jobId) { Optional jobOpt = backupJobRepository.findById(jobId); if (jobOpt.isEmpty()) { From 4abb442c5013f7fd57bfbaf756c53fe60876fbc2 Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 14:34:26 +0200 Subject: [PATCH 8/9] fix async --- .../service/AsyncBackupExecutor.java | 125 ++++++++++++++++++ .../storycove/service/AsyncBackupService.java | 101 ++------------ 2 files changed, 135 insertions(+), 91 deletions(-) create mode 100644 backend/src/main/java/com/storycove/service/AsyncBackupExecutor.java diff --git a/backend/src/main/java/com/storycove/service/AsyncBackupExecutor.java b/backend/src/main/java/com/storycove/service/AsyncBackupExecutor.java new file mode 100644 index 0000000..d8c0f10 --- /dev/null +++ b/backend/src/main/java/com/storycove/service/AsyncBackupExecutor.java @@ -0,0 +1,125 @@ +package com.storycove.service; + +import com.storycove.entity.BackupJob; +import com.storycove.repository.BackupJobRepository; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.io.Resource; +import org.springframework.scheduling.annotation.Async; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Propagation; +import org.springframework.transaction.annotation.Transactional; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.LocalDateTime; +import java.util.Optional; +import java.util.UUID; + +/** + * Separate service for async backup execution. + * This is needed because @Async doesn't work when called from within the same class. + */ +@Service +public class AsyncBackupExecutor { + + private static final Logger logger = LoggerFactory.getLogger(AsyncBackupExecutor.class); + + @Value("${storycove.upload.dir:/app/images}") + private String uploadDir; + + @Autowired + private BackupJobRepository backupJobRepository; + + @Autowired + private DatabaseManagementService databaseManagementService; + + @Autowired + private LibraryService libraryService; + + /** + * Execute backup asynchronously. + * This method MUST be in a separate service class for @Async to work properly. + */ + @Async + @Transactional(propagation = Propagation.REQUIRES_NEW) + public void executeBackupAsync(UUID jobId) { + logger.info("Async executor starting for job {}", jobId); + + Optional jobOpt = backupJobRepository.findById(jobId); + if (jobOpt.isEmpty()) { + logger.error("Backup job not found: {}", jobId); + return; + } + + BackupJob job = jobOpt.get(); + job.setStatus(BackupJob.BackupStatus.IN_PROGRESS); + job.setStartedAt(LocalDateTime.now()); + job.setProgressPercent(0); + backupJobRepository.save(job); + + try { + logger.info("Starting backup job {} for library {}", job.getId(), job.getLibraryId()); + + // Switch to the correct library + if (!job.getLibraryId().equals(libraryService.getCurrentLibraryId())) { + libraryService.switchToLibraryAfterAuthentication(job.getLibraryId()); + } + + // Create backup file + Path backupDir = Paths.get(uploadDir, "backups", job.getLibraryId()); + Files.createDirectories(backupDir); + + String filename = String.format("backup_%s_%s.%s", + job.getId().toString(), + LocalDateTime.now().toString().replaceAll(":", "-"), + job.getType() == BackupJob.BackupType.COMPLETE ? "zip" : "sql"); + + Path backupFile = backupDir.resolve(filename); + + job.setProgressPercent(10); + backupJobRepository.save(job); + + // Create the backup + Resource backupResource; + if (job.getType() == BackupJob.BackupType.COMPLETE) { + backupResource = databaseManagementService.createCompleteBackup(); + } else { + backupResource = databaseManagementService.createBackup(); + } + + job.setProgressPercent(80); + backupJobRepository.save(job); + + // Copy resource to permanent file + try (var inputStream = backupResource.getInputStream(); + var outputStream = Files.newOutputStream(backupFile)) { + inputStream.transferTo(outputStream); + } + + job.setProgressPercent(95); + backupJobRepository.save(job); + + // Set file info + job.setFilePath(backupFile.toString()); + job.setFileSizeBytes(Files.size(backupFile)); + job.setStatus(BackupJob.BackupStatus.COMPLETED); + job.setCompletedAt(LocalDateTime.now()); + job.setProgressPercent(100); + + logger.info("Backup job {} completed successfully. File size: {} bytes", + job.getId(), job.getFileSizeBytes()); + + } catch (Exception e) { + logger.error("Backup job {} failed", job.getId(), e); + job.setStatus(BackupJob.BackupStatus.FAILED); + job.setErrorMessage(e.getMessage()); + job.setCompletedAt(LocalDateTime.now()); + } finally { + backupJobRepository.save(job); + } + } +} diff --git a/backend/src/main/java/com/storycove/service/AsyncBackupService.java b/backend/src/main/java/com/storycove/service/AsyncBackupService.java index 84d0baa..19cbae7 100644 --- a/backend/src/main/java/com/storycove/service/AsyncBackupService.java +++ b/backend/src/main/java/com/storycove/service/AsyncBackupService.java @@ -8,7 +8,6 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.core.io.FileSystemResource; import org.springframework.core.io.Resource; -import org.springframework.scheduling.annotation.Async; import org.springframework.scheduling.annotation.Scheduled; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; @@ -17,7 +16,6 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.sql.SQLException; import java.time.LocalDateTime; import java.util.List; import java.util.Optional; @@ -35,108 +33,29 @@ public class AsyncBackupService { private BackupJobRepository backupJobRepository; @Autowired - private DatabaseManagementService databaseManagementService; - - @Autowired - private LibraryService libraryService; + private AsyncBackupExecutor asyncBackupExecutor; /** - * Start a backup job asynchronously + * Start a backup job asynchronously. + * This method returns immediately after creating the job record. */ @Transactional public BackupJob startBackupJob(String libraryId, BackupJob.BackupType type) { + logger.info("Creating backup job for library: {}, type: {}", libraryId, type); + BackupJob job = new BackupJob(libraryId, type); job = backupJobRepository.save(job); - // Force flush to ensure job is committed to DB before async execution - backupJobRepository.flush(); + logger.info("Backup job created with ID: {}. Starting async execution...", job.getId()); - // Start backup in background (async method will run in separate thread after this transaction commits) - executeBackupAsync(job.getId()); + // Start backup in background using separate service (ensures @Async works properly) + asyncBackupExecutor.executeBackupAsync(job.getId()); + + logger.info("Async backup execution triggered for job: {}", job.getId()); return job; } - /** - * Execute backup asynchronously - */ - @Async - @Transactional(propagation = org.springframework.transaction.annotation.Propagation.REQUIRES_NEW) - public void executeBackupAsync(UUID jobId) { - Optional jobOpt = backupJobRepository.findById(jobId); - if (jobOpt.isEmpty()) { - logger.error("Backup job not found: {}", jobId); - return; - } - - BackupJob job = jobOpt.get(); - job.setStatus(BackupJob.BackupStatus.IN_PROGRESS); - job.setStartedAt(LocalDateTime.now()); - job.setProgressPercent(0); - backupJobRepository.save(job); - - try { - logger.info("Starting backup job {} for library {}", job.getId(), job.getLibraryId()); - - // Switch to the correct library - if (!job.getLibraryId().equals(libraryService.getCurrentLibraryId())) { - libraryService.switchToLibraryAfterAuthentication(job.getLibraryId()); - } - - // Create backup file - Path backupDir = Paths.get(uploadDir, "backups", job.getLibraryId()); - Files.createDirectories(backupDir); - - String filename = String.format("backup_%s_%s.%s", - job.getId().toString(), - LocalDateTime.now().toString().replaceAll(":", "-"), - job.getType() == BackupJob.BackupType.COMPLETE ? "zip" : "sql"); - - Path backupFile = backupDir.resolve(filename); - - job.setProgressPercent(10); - backupJobRepository.save(job); - - // Create the backup - Resource backupResource; - if (job.getType() == BackupJob.BackupType.COMPLETE) { - backupResource = databaseManagementService.createCompleteBackup(); - } else { - backupResource = databaseManagementService.createBackup(); - } - - job.setProgressPercent(80); - backupJobRepository.save(job); - - // Copy resource to permanent file - try (var inputStream = backupResource.getInputStream(); - var outputStream = Files.newOutputStream(backupFile)) { - inputStream.transferTo(outputStream); - } - - job.setProgressPercent(95); - backupJobRepository.save(job); - - // Set file info - job.setFilePath(backupFile.toString()); - job.setFileSizeBytes(Files.size(backupFile)); - job.setStatus(BackupJob.BackupStatus.COMPLETED); - job.setCompletedAt(LocalDateTime.now()); - job.setProgressPercent(100); - - logger.info("Backup job {} completed successfully. File size: {} bytes", - job.getId(), job.getFileSizeBytes()); - - } catch (Exception e) { - logger.error("Backup job {} failed", job.getId(), e); - job.setStatus(BackupJob.BackupStatus.FAILED); - job.setErrorMessage(e.getMessage()); - job.setCompletedAt(LocalDateTime.now()); - } finally { - backupJobRepository.save(job); - } - } - /** * Get backup job status */ From ff49589f327206de5e108daeb30fc1a73795e234 Mon Sep 17 00:00:00 2001 From: Stefan Hardegger Date: Mon, 20 Oct 2025 14:51:27 +0200 Subject: [PATCH 9/9] Automatic backup --- .../storycove/repository/StoryRepository.java | 3 + .../service/AutomaticBackupService.java | 262 ++++++++++++++++++ backend/src/main/resources/application.yml | 2 + docker-compose.yml | 2 + 4 files changed, 269 insertions(+) create mode 100644 backend/src/main/java/com/storycove/service/AutomaticBackupService.java diff --git a/backend/src/main/java/com/storycove/repository/StoryRepository.java b/backend/src/main/java/com/storycove/repository/StoryRepository.java index 2565bc6..2e532f6 100644 --- a/backend/src/main/java/com/storycove/repository/StoryRepository.java +++ b/backend/src/main/java/com/storycove/repository/StoryRepository.java @@ -86,6 +86,9 @@ public interface StoryRepository extends JpaRepository { @Query("SELECT COUNT(s) FROM Story s WHERE s.createdAt >= :since") long countStoriesCreatedSince(@Param("since") LocalDateTime since); + + @Query("SELECT COUNT(s) FROM Story s WHERE s.createdAt >= :since OR s.updatedAt >= :since") + long countStoriesModifiedAfter(@Param("since") LocalDateTime since); @Query("SELECT AVG(s.wordCount) FROM Story s") Double findAverageWordCount(); diff --git a/backend/src/main/java/com/storycove/service/AutomaticBackupService.java b/backend/src/main/java/com/storycove/service/AutomaticBackupService.java new file mode 100644 index 0000000..cbf6f7d --- /dev/null +++ b/backend/src/main/java/com/storycove/service/AutomaticBackupService.java @@ -0,0 +1,262 @@ +package com.storycove.service; + +import com.storycove.repository.StoryRepository; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.io.Resource; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Service; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Service for automatic daily backups. + * Runs at 4 AM daily and creates a backup if content has changed since last backup. + * Keeps maximum of 5 backups, rotating old ones out. + */ +@Service +public class AutomaticBackupService { + + private static final Logger logger = LoggerFactory.getLogger(AutomaticBackupService.class); + private static final int MAX_BACKUPS = 5; + private static final DateTimeFormatter FILENAME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd_HH-mm-ss"); + + @Value("${storycove.automatic-backup.dir:/app/automatic-backups}") + private String automaticBackupDir; + + @Autowired + private StoryRepository storyRepository; + + @Autowired + private DatabaseManagementService databaseManagementService; + + @Autowired + private LibraryService libraryService; + + private LocalDateTime lastBackupCheck = null; + + /** + * Scheduled job that runs daily at 4 AM. + * Creates a backup if content has changed since last backup. + */ + @Scheduled(cron = "0 0 4 * * ?") + public void performAutomaticBackup() { + logger.info("========================================"); + logger.info("Starting automatic backup check at 4 AM"); + logger.info("========================================"); + + try { + // Get current library ID (or default) + String libraryId = libraryService.getCurrentLibraryId(); + if (libraryId == null) { + libraryId = "default"; + } + + logger.info("Checking for content changes in library: {}", libraryId); + + // Check if content has changed since last backup + if (!hasContentChanged()) { + logger.info("No content changes detected since last backup. Skipping backup."); + logger.info("========================================"); + return; + } + + logger.info("Content changes detected! Creating automatic backup..."); + + // Create backup directory for this library + Path backupPath = Paths.get(automaticBackupDir, libraryId); + Files.createDirectories(backupPath); + + // Create the backup + String timestamp = LocalDateTime.now().format(FILENAME_FORMATTER); + String filename = String.format("auto_backup_%s.zip", timestamp); + Path backupFile = backupPath.resolve(filename); + + logger.info("Creating complete backup to: {}", backupFile); + + Resource backup = databaseManagementService.createCompleteBackup(); + + // Write backup to file + try (var inputStream = backup.getInputStream(); + var outputStream = Files.newOutputStream(backupFile)) { + inputStream.transferTo(outputStream); + } + + long fileSize = Files.size(backupFile); + logger.info("✅ Automatic backup created successfully"); + logger.info(" File: {}", backupFile.getFileName()); + logger.info(" Size: {} MB", fileSize / 1024 / 1024); + + // Rotate old backups (keep only MAX_BACKUPS) + rotateBackups(backupPath); + + // Update last backup check time + lastBackupCheck = LocalDateTime.now(); + + logger.info("========================================"); + logger.info("Automatic backup completed successfully"); + logger.info("========================================"); + + } catch (Exception e) { + logger.error("❌ Automatic backup failed", e); + logger.info("========================================"); + } + } + + /** + * Check if content has changed since last backup. + * Looks for stories created or updated after the last backup time. + */ + private boolean hasContentChanged() { + try { + if (lastBackupCheck == null) { + // First run - check if there are any stories at all + long storyCount = storyRepository.count(); + logger.info("First backup check - found {} stories", storyCount); + return storyCount > 0; + } + + // Check for stories created or updated since last backup + long changedCount = storyRepository.countStoriesModifiedAfter(lastBackupCheck); + logger.info("Found {} stories modified since last backup ({})", changedCount, lastBackupCheck); + return changedCount > 0; + + } catch (Exception e) { + logger.error("Error checking for content changes", e); + // On error, create backup to be safe + return true; + } + } + + /** + * Rotate backups - keep only MAX_BACKUPS most recent backups. + * Deletes older backups. + */ + private void rotateBackups(Path backupPath) throws IOException { + logger.info("Checking for old backups to rotate..."); + + // Find all backup files in the directory + List backupFiles; + try (Stream stream = Files.list(backupPath)) { + backupFiles = stream + .filter(Files::isRegularFile) + .filter(p -> p.getFileName().toString().startsWith("auto_backup_")) + .filter(p -> p.getFileName().toString().endsWith(".zip")) + .sorted(Comparator.comparing((Path p) -> { + try { + return Files.getLastModifiedTime(p); + } catch (IOException e) { + return null; + } + }).reversed()) // Most recent first + .collect(Collectors.toList()); + } + + logger.info("Found {} automatic backups", backupFiles.size()); + + // Delete old backups if we exceed MAX_BACKUPS + if (backupFiles.size() > MAX_BACKUPS) { + List toDelete = backupFiles.subList(MAX_BACKUPS, backupFiles.size()); + logger.info("Deleting {} old backups to maintain maximum of {}", toDelete.size(), MAX_BACKUPS); + + for (Path oldBackup : toDelete) { + try { + Files.delete(oldBackup); + logger.info(" Deleted old backup: {}", oldBackup.getFileName()); + } catch (IOException e) { + logger.warn("Failed to delete old backup: {}", oldBackup, e); + } + } + } else { + logger.info("Backup count within limit ({}), no rotation needed", MAX_BACKUPS); + } + } + + /** + * Manual trigger for testing - creates backup immediately if content changed. + */ + public void triggerManualBackup() { + logger.info("Manual automatic backup triggered"); + performAutomaticBackup(); + } + + /** + * Get list of automatic backups for the current library. + */ + public List listAutomaticBackups() throws IOException { + String libraryId = libraryService.getCurrentLibraryId(); + if (libraryId == null) { + libraryId = "default"; + } + + Path backupPath = Paths.get(automaticBackupDir, libraryId); + if (!Files.exists(backupPath)) { + return List.of(); + } + + try (Stream stream = Files.list(backupPath)) { + return stream + .filter(Files::isRegularFile) + .filter(p -> p.getFileName().toString().startsWith("auto_backup_")) + .filter(p -> p.getFileName().toString().endsWith(".zip")) + .sorted(Comparator.comparing((Path p) -> { + try { + return Files.getLastModifiedTime(p); + } catch (IOException e) { + return null; + } + }).reversed()) + .map(p -> { + try { + return new BackupInfo( + p.getFileName().toString(), + Files.size(p), + Files.getLastModifiedTime(p).toInstant().toString() + ); + } catch (IOException e) { + return null; + } + }) + .filter(info -> info != null) + .collect(Collectors.toList()); + } + } + + /** + * Simple backup info class. + */ + public static class BackupInfo { + private final String filename; + private final long sizeBytes; + private final String createdAt; + + public BackupInfo(String filename, long sizeBytes, String createdAt) { + this.filename = filename; + this.sizeBytes = sizeBytes; + this.createdAt = createdAt; + } + + public String getFilename() { + return filename; + } + + public long getSizeBytes() { + return sizeBytes; + } + + public String getCreatedAt() { + return createdAt; + } + } +} diff --git a/backend/src/main/resources/application.yml b/backend/src/main/resources/application.yml index 78a5cc7..0d4205b 100644 --- a/backend/src/main/resources/application.yml +++ b/backend/src/main/resources/application.yml @@ -89,6 +89,8 @@ storycove: enable-metrics: ${SOLR_ENABLE_METRICS:true} images: storage-path: ${IMAGE_STORAGE_PATH:/app/images} + automatic-backup: + dir: ${AUTOMATIC_BACKUP_DIR:/app/automatic-backups} management: endpoints: diff --git a/docker-compose.yml b/docker-compose.yml index 24d5981..e664c42 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -44,6 +44,7 @@ services: volumes: - images_data:/app/images - library_config:/app/config + - automatic_backups:/app/automatic-backups depends_on: postgres: condition: service_healthy @@ -106,6 +107,7 @@ volumes: solr_data: images_data: library_config: + automatic_backups: configs: nginx_config: