686 lines
No EOL
23 KiB
Bash
686 lines
No EOL
23 KiB
Bash
#!/bin/bash
|
|
|
|
###############################################################################
|
|
# Supabase Backup Script
|
|
#
|
|
# Strategy: Minimal local storage + Full backup to Synology NAS
|
|
# - Local: Only latest daily backup + logs (saves disk space)
|
|
# - NAS: Full rotation (7 daily / 4 weekly / 3 monthly)
|
|
# - Authentication: sshpass with password file
|
|
# - Transfer: rsync over SSH (reliable, resumable)
|
|
# - Verification: SHA256 checksums, tar integrity check
|
|
#
|
|
# Author: ravolar
|
|
# Server: @rag, Ubuntu, 192.168.10.33
|
|
# NAS: Synology DS, 192.168.10.163:2022
|
|
###############################################################################
|
|
|
|
set -euo pipefail
|
|
|
|
# ===========================
|
|
# CONFIGURATION
|
|
# ===========================
|
|
|
|
# Supabase installation directory
|
|
SUPABASE_DIR="/home/ravolar/supabase"
|
|
|
|
# Local backup storage paths
|
|
BACKUP_LOCAL="/opt/backups/supabase"
|
|
BACKUP_LOCAL_DAILY="${BACKUP_LOCAL}/daily"
|
|
BACKUP_LOGS="${BACKUP_LOCAL}/logs"
|
|
|
|
# Synology NAS connection settings
|
|
SYNOLOGY_USER="ravolar"
|
|
SYNOLOGY_HOST="192.168.10.163"
|
|
SYNOLOGY_PORT="2022"
|
|
SYNOLOGY_PASSWORD_FILE="${HOME}/.synology_password"
|
|
|
|
# NAS backup paths
|
|
SYNOLOGY_PATH="/volume1/backup/rag/supabase"
|
|
SYNOLOGY_DAILY="${SYNOLOGY_PATH}/daily"
|
|
SYNOLOGY_WEEKLY="${SYNOLOGY_PATH}/weekly"
|
|
SYNOLOGY_MONTHLY="${SYNOLOGY_PATH}/monthly"
|
|
|
|
# Retention policy for NAS backups
|
|
DAILY_KEEP=7 # Keep last 7 daily backups
|
|
WEEKLY_KEEP=4 # Keep last 4 weekly backups (Mondays)
|
|
MONTHLY_KEEP=3 # Keep last 3 monthly backups (1st of month)
|
|
|
|
# Upload retry configuration
|
|
MAX_RETRIES=3
|
|
RETRY_DELAY=5
|
|
|
|
# Minimum free space requirements (in MB)
|
|
MIN_FREE_SPACE_TMP=1000 # 1GB for temporary files
|
|
MIN_FREE_SPACE_LOCAL=500 # 500MB for local backup storage
|
|
MIN_FREE_SPACE_NAS=2000 # 2GB for NAS storage
|
|
|
|
# Backup file naming
|
|
TIMESTAMP=$(date +%Y-%m-%d_%H-%M-%S)
|
|
DAY_OF_WEEK=$(date +%u) # 1=Monday, 7=Sunday
|
|
DAY_OF_MONTH=$(date +%d) # 01-31
|
|
BACKUP_NAME="supabase_${TIMESTAMP}.tar.gz"
|
|
LOG_FILE="${BACKUP_LOGS}/backup_${TIMESTAMP}.log"
|
|
|
|
# ===========================
|
|
# UTILITY FUNCTIONS
|
|
# ===========================
|
|
|
|
# Log message to both console and log file
|
|
# Ensures log directory exists before writing
|
|
# Args: $* - message to log
|
|
log() {
|
|
mkdir -p "$(dirname "${LOG_FILE}")" 2>/dev/null || true
|
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "${LOG_FILE}"
|
|
}
|
|
|
|
# Log error and exit with status 1
|
|
# Args: $1 - error message
|
|
error_exit() {
|
|
log "ERROR: $1"
|
|
exit 1
|
|
}
|
|
|
|
# ===========================
|
|
# PRE-FLIGHT CHECKS
|
|
# ===========================
|
|
|
|
# Check that all required tools and files are available
|
|
# Verifies: sshpass, rsync, nc (optional), password file
|
|
check_prerequisites() {
|
|
log "Checking prerequisites..."
|
|
|
|
# Check for sshpass (required for password-based SSH/SCP)
|
|
if ! command -v sshpass &> /dev/null; then
|
|
error_exit "sshpass is not installed. Install with: sudo apt install sshpass"
|
|
fi
|
|
|
|
# Check for rsync (required for file transfers)
|
|
if ! command -v rsync &> /dev/null; then
|
|
error_exit "rsync is not installed. Install with: sudo apt install rsync"
|
|
fi
|
|
|
|
# Check for netcat (optional, for connectivity pre-check)
|
|
if ! command -v nc &> /dev/null; then
|
|
log "WARNING: netcat not installed, skipping connectivity pre-check"
|
|
fi
|
|
|
|
# Verify password file exists
|
|
if [ ! -f "${SYNOLOGY_PASSWORD_FILE}" ]; then
|
|
error_exit "Password file not found: ${SYNOLOGY_PASSWORD_FILE}"
|
|
fi
|
|
|
|
# Check and fix password file permissions (must be 600 for security)
|
|
local perms=$(stat -c %a "${SYNOLOGY_PASSWORD_FILE}" 2>/dev/null || stat -f %A "${SYNOLOGY_PASSWORD_FILE}" 2>/dev/null)
|
|
if [ "$perms" != "600" ]; then
|
|
log "WARNING: Password file has insecure permissions. Fixing..."
|
|
chmod 600 "${SYNOLOGY_PASSWORD_FILE}"
|
|
fi
|
|
|
|
log "Prerequisites check passed"
|
|
}
|
|
|
|
# Check available disk space on all storage locations
|
|
# Validates: /tmp, local backup dir, NAS
|
|
check_disk_space() {
|
|
log "Checking disk space..."
|
|
|
|
# Ensure backup directory exists before checking (df needs target to exist)
|
|
mkdir -p "${BACKUP_LOCAL}" 2>/dev/null || true
|
|
|
|
# Check /tmp (used for temporary backup assembly)
|
|
local tmp_free=$(df -m /tmp | awk 'NR==2 {print $4}')
|
|
log " /tmp: ${tmp_free}MB free"
|
|
if [ "${tmp_free}" -lt "${MIN_FREE_SPACE_TMP}" ]; then
|
|
error_exit "Insufficient space in /tmp (need ${MIN_FREE_SPACE_TMP}MB, have ${tmp_free}MB)"
|
|
fi
|
|
|
|
# Check local backup directory
|
|
local local_free=$(df -m "${BACKUP_LOCAL}" | awk 'NR==2 {print $4}')
|
|
log " Local backup: ${local_free}MB free"
|
|
if [ "${local_free}" -lt "${MIN_FREE_SPACE_LOCAL}" ]; then
|
|
error_exit "Insufficient space in ${BACKUP_LOCAL} (need ${MIN_FREE_SPACE_LOCAL}MB, have ${local_free}MB)"
|
|
fi
|
|
|
|
# Check NAS storage (check parent path to avoid issues if backup dir doesn't exist yet)
|
|
local nas_parent
|
|
nas_parent=$(dirname "${SYNOLOGY_PATH}")
|
|
local nas_free
|
|
nas_free=$(ssh_nas "df -m ${nas_parent} | awk 'NR==2 {print \$4}'")
|
|
log " NAS: ${nas_free}MB free"
|
|
if [ "${nas_free}" -lt "${MIN_FREE_SPACE_NAS}" ]; then
|
|
error_exit "Insufficient space on NAS (need ${MIN_FREE_SPACE_NAS}MB, have ${nas_free}MB)"
|
|
fi
|
|
|
|
log "Disk space check passed"
|
|
}
|
|
|
|
# Test connectivity to Synology NAS
|
|
# Tests: SSH port open, SSH authentication
|
|
check_nas_connectivity() {
|
|
log "Checking NAS connectivity..."
|
|
|
|
# Test if SSH port is open (using netcat if available)
|
|
if command -v nc &> /dev/null; then
|
|
if nc -z -w5 "${SYNOLOGY_HOST}" "${SYNOLOGY_PORT}" 2>/dev/null; then
|
|
log " Port ${SYNOLOGY_PORT} is open"
|
|
else
|
|
error_exit "Cannot connect to ${SYNOLOGY_HOST}:${SYNOLOGY_PORT}"
|
|
fi
|
|
fi
|
|
|
|
# Test SSH authentication
|
|
if ssh_nas "echo 'SSH connection OK'" >/dev/null 2>&1; then
|
|
log " SSH connection successful"
|
|
else
|
|
error_exit "SSH connection to NAS failed"
|
|
fi
|
|
|
|
log "NAS connectivity check passed"
|
|
}
|
|
|
|
# ===========================
|
|
# NETWORK OPERATIONS
|
|
# ===========================
|
|
|
|
# Execute SSH command on Synology NAS
|
|
# Args: $@ - command to execute
|
|
# Returns: command output and exit code
|
|
ssh_nas() {
|
|
sshpass -f "${SYNOLOGY_PASSWORD_FILE}" \
|
|
ssh -p "${SYNOLOGY_PORT}" \
|
|
-o StrictHostKeyChecking=no \
|
|
-o ConnectTimeout=10 \
|
|
"${SYNOLOGY_USER}@${SYNOLOGY_HOST}" "$@"
|
|
}
|
|
|
|
# Upload file to NAS using rsync with retry logic
|
|
# Args: $1 - source file path
|
|
# $2 - destination path on NAS
|
|
# Returns: 0 on success, 1 on failure after all retries
|
|
upload_file_to_nas() {
|
|
local source_file="${1}"
|
|
local dest_path="${2}"
|
|
local attempt=1
|
|
local max_attempts="${MAX_RETRIES}"
|
|
|
|
# Retry loop
|
|
while [ "${attempt}" -le "${max_attempts}" ]; do
|
|
log " Upload attempt ${attempt}/${max_attempts}..."
|
|
|
|
# Attempt rsync transfer
|
|
if sshpass -f "${SYNOLOGY_PASSWORD_FILE}" \
|
|
rsync -avz --timeout=300 \
|
|
-e "ssh -p ${SYNOLOGY_PORT} -o StrictHostKeyChecking=no -o ConnectTimeout=30" \
|
|
"${source_file}" "${SYNOLOGY_USER}@${SYNOLOGY_HOST}:${dest_path}/"; then
|
|
return 0
|
|
else
|
|
# If not last attempt, wait and retry
|
|
if [ "${attempt}" -lt "${max_attempts}" ]; then
|
|
log " Upload failed, retrying in ${RETRY_DELAY} seconds..."
|
|
sleep "${RETRY_DELAY}"
|
|
fi
|
|
fi
|
|
|
|
attempt=$((attempt + 1))
|
|
done
|
|
|
|
return 1
|
|
}
|
|
|
|
# ===========================
|
|
# BACKUP PREPARATION
|
|
# ===========================
|
|
|
|
# Create necessary backup directories
|
|
# Creates: local daily/logs dirs, NAS daily/weekly/monthly dirs
|
|
create_dirs() {
|
|
log "Creating local directories..."
|
|
mkdir -p "${BACKUP_LOCAL_DAILY}" "${BACKUP_LOGS}"
|
|
|
|
log "Creating NAS directories..."
|
|
ssh_nas "mkdir -p ${SYNOLOGY_DAILY} ${SYNOLOGY_WEEKLY} ${SYNOLOGY_MONTHLY}" || {
|
|
error_exit "Failed to create directories on NAS. Check connection and credentials."
|
|
}
|
|
}
|
|
|
|
# ===========================
|
|
# BACKUP OPERATIONS
|
|
# ===========================
|
|
|
|
# Backup all PostgreSQL databases
|
|
# Creates: compressed SQL dumps of all databases and global objects
|
|
# Args: $1 - temporary directory for backup assembly
|
|
backup_postgres() {
|
|
log "Starting PostgreSQL backup..."
|
|
|
|
local temp_dir="${1}"
|
|
local db_backup_dir="${temp_dir}/databases"
|
|
mkdir -p "${db_backup_dir}"
|
|
|
|
# Find PostgreSQL container
|
|
local pg_container=$(docker ps --filter "name=supabase.*db" --format "{{.Names}}" | head -n1)
|
|
|
|
if [ -z "${pg_container}" ]; then
|
|
error_exit "PostgreSQL container not found"
|
|
fi
|
|
|
|
log "Found PostgreSQL container: ${pg_container}"
|
|
|
|
# Backup main postgres database (contains auth and storage schemas)
|
|
log "Backing up database: postgres"
|
|
docker exec "${pg_container}" pg_dump -U postgres -d postgres | \
|
|
gzip > "${db_backup_dir}/postgres.sql.gz" || {
|
|
error_exit "Failed to backup postgres database"
|
|
}
|
|
|
|
# Backup all user-created databases
|
|
log "Checking for user databases..."
|
|
docker exec "${pg_container}" psql -U postgres -t \
|
|
-c "SELECT datname FROM pg_database WHERE datistemplate = false AND datname NOT IN ('postgres');" | \
|
|
while read -r db; do
|
|
db=$(echo "$db" | xargs) # trim whitespace
|
|
if [ -n "$db" ]; then
|
|
log "Backing up user database: ${db}"
|
|
docker exec "${pg_container}" pg_dump -U postgres -d "${db}" | \
|
|
gzip > "${db_backup_dir}/${db}.sql.gz"
|
|
fi
|
|
done
|
|
|
|
# Backup global objects (roles, tablespaces, privileges)
|
|
log "Backing up global objects..."
|
|
docker exec "${pg_container}" pg_dumpall -U postgres -g | \
|
|
gzip > "${db_backup_dir}/globals.sql.gz"
|
|
|
|
log "PostgreSQL backup completed"
|
|
}
|
|
|
|
# Backup Supabase Storage files (buckets and uploaded files)
|
|
# Copies from: volumes/storage directory
|
|
# Args: $1 - temporary directory for backup assembly
|
|
backup_storage() {
|
|
log "Starting Storage backup..."
|
|
|
|
local temp_dir="${1}"
|
|
local storage_backup_dir="${temp_dir}/storage"
|
|
mkdir -p "${storage_backup_dir}"
|
|
|
|
local local_storage_path="${SUPABASE_DIR}/volumes/storage"
|
|
|
|
if [ -d "${local_storage_path}" ]; then
|
|
log "Found local storage path: ${local_storage_path}"
|
|
|
|
# Copy storage files using sudo (files owned by root)
|
|
sudo rsync -a "${local_storage_path}/" "${storage_backup_dir}/" 2>/dev/null || {
|
|
log "WARNING: Failed to copy some storage files"
|
|
}
|
|
|
|
# Fix ownership so tar can archive without permission errors
|
|
sudo chown -R $(whoami):$(whoami) "${storage_backup_dir}/" 2>/dev/null || true
|
|
|
|
# Count and report results
|
|
local file_count=$(find "${storage_backup_dir}" -type f 2>/dev/null | wc -l || echo "0")
|
|
local total_size=$(du -sh "${storage_backup_dir}" 2>/dev/null | cut -f1 || echo "0")
|
|
|
|
if [ "${file_count}" -gt 0 ]; then
|
|
log "✓ Storage backup completed: ${file_count} files, ${total_size}"
|
|
else
|
|
log "WARNING: Storage path exists but no files found"
|
|
fi
|
|
else
|
|
log "WARNING: Storage path not found: ${local_storage_path}"
|
|
fi
|
|
}
|
|
|
|
# Backup Supabase configuration files
|
|
# Includes: .env, docker-compose.yml, docker-compose.override.yml
|
|
# Args: $1 - temporary directory for backup assembly
|
|
backup_config_files() {
|
|
log "Starting config files backup..."
|
|
|
|
local temp_dir="${1}"
|
|
local config_backup_dir="${temp_dir}/config"
|
|
mkdir -p "${config_backup_dir}"
|
|
|
|
cd "${SUPABASE_DIR}" || error_exit "Cannot access Supabase directory: ${SUPABASE_DIR}"
|
|
|
|
# Backup .env file (contains secrets and configuration)
|
|
if [ -f ".env" ]; then
|
|
cp ".env" "${config_backup_dir}/.env"
|
|
log "Backed up: .env"
|
|
else
|
|
log "WARNING: .env file not found"
|
|
fi
|
|
|
|
# Backup docker-compose.yml (base configuration)
|
|
if [ -f "docker-compose.yml" ]; then
|
|
cp "docker-compose.yml" "${config_backup_dir}/docker-compose.yml"
|
|
log "Backed up: docker-compose.yml"
|
|
fi
|
|
|
|
# Backup docker-compose.override.yml (custom overrides)
|
|
if [ -f "docker-compose.override.yml" ]; then
|
|
cp "docker-compose.override.yml" "${config_backup_dir}/docker-compose.override.yml"
|
|
log "Backed up: docker-compose.override.yml"
|
|
fi
|
|
|
|
log "Config files backup completed"
|
|
}
|
|
|
|
# Collect system metadata for backup documentation
|
|
# Includes: containers, volumes, disk usage, directory structure
|
|
# Args: $1 - temporary directory for backup assembly
|
|
backup_metadata() {
|
|
log "Collecting metadata..."
|
|
|
|
local temp_dir="${1}"
|
|
local metadata_file="${temp_dir}/metadata.txt"
|
|
|
|
# Generate metadata report
|
|
{
|
|
echo "=== Supabase Backup Metadata ==="
|
|
echo "Backup Date: $(date)"
|
|
echo "Hostname: $(hostname)"
|
|
echo "Strategy: Variant 1 - Minimal local + Full NAS"
|
|
echo "NAS: ${SYNOLOGY_USER}@${SYNOLOGY_HOST}:${SYNOLOGY_PATH}"
|
|
echo ""
|
|
echo "=== Docker Containers ==="
|
|
docker ps --filter "name=supabase" --format "table {{.Names}}\t{{.Image}}\t{{.Status}}"
|
|
echo ""
|
|
echo "=== Docker Volumes ==="
|
|
docker volume ls --filter "name=supabase" --format "table {{.Name}}\t{{.Driver}}\t{{.Mountpoint}}"
|
|
echo ""
|
|
echo "=== Disk Usage ==="
|
|
df -h "${SUPABASE_DIR}"
|
|
echo ""
|
|
echo "=== Supabase Directory Contents ==="
|
|
ls -lah "${SUPABASE_DIR}"
|
|
} > "${metadata_file}"
|
|
|
|
log "Metadata collected"
|
|
}
|
|
|
|
# ===========================
|
|
# ARCHIVE OPERATIONS
|
|
# ===========================
|
|
|
|
# Create compressed tar.gz archive from backup directory
|
|
# Args: $1 - source directory (temporary backup assembly)
|
|
# $2 - destination file path
|
|
create_archive() {
|
|
log "Creating compressed archive..."
|
|
|
|
local temp_dir="${1}"
|
|
local backup_dest="${2}"
|
|
|
|
# Brief pause to ensure all file operations are complete
|
|
sleep 1
|
|
|
|
# Create archive from outside the directory to avoid "file changed" errors
|
|
tar -czf "${backup_dest}" -C "${temp_dir}" . || error_exit "Failed to create archive"
|
|
|
|
# Report archive size
|
|
local archive_size=$(du -h "${backup_dest}" | cut -f1)
|
|
log "Archive created: ${BACKUP_NAME} (${archive_size})"
|
|
}
|
|
|
|
# Verify tar archive integrity
|
|
# Args: $1 - path to backup archive
|
|
verify_archive() {
|
|
log "Verifying archive integrity..."
|
|
|
|
local backup_file="${1}"
|
|
|
|
# Test tar file by listing contents (catches corruption)
|
|
if tar -tzf "${backup_file}" > /dev/null 2>&1; then
|
|
log "✓ Archive integrity verified"
|
|
else
|
|
error_exit "Archive integrity check failed"
|
|
fi
|
|
}
|
|
|
|
# Generate SHA256 checksum file
|
|
# Args: $1 - backup file path
|
|
# $2 - checksum file path
|
|
create_checksum() {
|
|
log "Creating SHA256 checksum..."
|
|
|
|
local backup_file="${1}"
|
|
local checksum_file="${2}"
|
|
|
|
# Generate checksum file
|
|
sha256sum "${backup_file}" > "${checksum_file}" || {
|
|
log "WARNING: Failed to create checksum"
|
|
return 1
|
|
}
|
|
|
|
# Log the checksum value
|
|
local checksum=$(cat "${checksum_file}" | awk '{print $1}')
|
|
log "✓ SHA256: ${checksum}"
|
|
}
|
|
|
|
# ===========================
|
|
# NAS UPLOAD & ROTATION
|
|
# ===========================
|
|
|
|
# Upload backup and checksum to NAS with proper categorization
|
|
# Handles: daily/weekly/monthly placement + duplication to daily
|
|
# Args: $1 - backup file path
|
|
# $2 - backup type (daily/weekly/monthly)
|
|
upload_to_nas() {
|
|
local backup_file="${1}"
|
|
local backup_type="${2}"
|
|
|
|
log "Uploading ${backup_type} backup to NAS..."
|
|
|
|
# Determine primary destination based on backup type
|
|
local nas_dest
|
|
case "${backup_type}" in
|
|
daily)
|
|
nas_dest="${SYNOLOGY_DAILY}"
|
|
;;
|
|
weekly)
|
|
nas_dest="${SYNOLOGY_WEEKLY}"
|
|
;;
|
|
monthly)
|
|
nas_dest="${SYNOLOGY_MONTHLY}"
|
|
;;
|
|
*)
|
|
error_exit "Unknown backup type: ${backup_type}"
|
|
;;
|
|
esac
|
|
|
|
# Upload backup archive to primary destination
|
|
if ! upload_file_to_nas "${backup_file}" "${nas_dest}"; then
|
|
error_exit "Failed to upload backup to NAS after ${MAX_RETRIES} attempts"
|
|
fi
|
|
|
|
log "✓ Uploaded to NAS: ${nas_dest}/$(basename ${backup_file})"
|
|
|
|
# Upload checksum file to primary destination
|
|
local checksum_file="${backup_file}.sha256"
|
|
if [ -f "${checksum_file}" ]; then
|
|
if upload_file_to_nas "${checksum_file}" "${nas_dest}"; then
|
|
log "✓ Checksum uploaded to NAS"
|
|
else
|
|
log "WARNING: Failed to upload checksum file"
|
|
fi
|
|
fi
|
|
|
|
# For weekly/monthly backups, also copy to daily folder for quick access
|
|
if [ "${backup_type}" != "daily" ]; then
|
|
# Upload backup archive to daily folder
|
|
if upload_file_to_nas "${backup_file}" "${SYNOLOGY_DAILY}"; then
|
|
log "✓ Backup also saved to daily on NAS"
|
|
|
|
# Upload checksum to daily folder too
|
|
if [ -f "${checksum_file}" ]; then
|
|
if upload_file_to_nas "${checksum_file}" "${SYNOLOGY_DAILY}"; then
|
|
log "✓ Checksum also saved to daily on NAS"
|
|
else
|
|
log "WARNING: Failed to upload checksum to daily folder"
|
|
fi
|
|
fi
|
|
else
|
|
log "WARNING: Failed to upload backup copy to daily folder"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# Remove old local backups (keep only latest)
|
|
# Retention: 1 backup locally, 30 days of logs
|
|
cleanup_local() {
|
|
log "Cleaning up local backups (keeping only latest)..."
|
|
|
|
# Count existing local backups
|
|
local backup_count=$(ls -1 "${BACKUP_LOCAL_DAILY}"/*.tar.gz 2>/dev/null | wc -l)
|
|
|
|
# Remove old backups if more than 1 exists
|
|
if [ "${backup_count}" -gt 1 ]; then
|
|
log "Removing old local backups (keeping only latest)"
|
|
ls -1t "${BACKUP_LOCAL_DAILY}"/*.tar.gz | tail -n +2 | while read file; do
|
|
rm -f "${file}" # Remove backup archive
|
|
rm -f "${file}.sha256" # Remove checksum file
|
|
done
|
|
fi
|
|
|
|
# Remove old log files (older than 30 days)
|
|
find "${BACKUP_LOGS}" -name "backup_*.log" -mtime +30 -delete 2>/dev/null || true
|
|
|
|
log "Local cleanup completed"
|
|
}
|
|
|
|
# Rotate old backups on NAS according to retention policy
|
|
# Removes: backups exceeding daily/weekly/monthly limits
|
|
rotate_nas_backups() {
|
|
log "Rotating backups on NAS..."
|
|
|
|
# Rotate daily backups (keep last 7)
|
|
log "Checking daily backups on NAS..."
|
|
local daily_count=$(ssh_nas "ls -1 ${SYNOLOGY_DAILY}/*.tar.gz 2>/dev/null | wc -l" || echo "0")
|
|
if [ "${daily_count}" -gt "${DAILY_KEEP}" ]; then
|
|
log "Removing old daily backups on NAS (keeping ${DAILY_KEEP})"
|
|
ssh_nas "cd ${SYNOLOGY_DAILY} && ls -1t *.tar.gz | tail -n +$((DAILY_KEEP + 1)) | while read f; do rm -f \"\$f\" \"\$f.sha256\"; done"
|
|
fi
|
|
|
|
# Rotate weekly backups (keep last 4)
|
|
log "Checking weekly backups on NAS..."
|
|
local weekly_count=$(ssh_nas "ls -1 ${SYNOLOGY_WEEKLY}/*.tar.gz 2>/dev/null | wc -l" || echo "0")
|
|
if [ "${weekly_count}" -gt "${WEEKLY_KEEP}" ]; then
|
|
log "Removing old weekly backups on NAS (keeping ${WEEKLY_KEEP})"
|
|
ssh_nas "cd ${SYNOLOGY_WEEKLY} && ls -1t *.tar.gz | tail -n +$((WEEKLY_KEEP + 1)) | while read f; do rm -f \"\$f\" \"\$f.sha256\"; done"
|
|
fi
|
|
|
|
# Rotate monthly backups (keep last 3)
|
|
log "Checking monthly backups on NAS..."
|
|
local monthly_count=$(ssh_nas "ls -1 ${SYNOLOGY_MONTHLY}/*.tar.gz 2>/dev/null | wc -l" || echo "0")
|
|
if [ "${monthly_count}" -gt "${MONTHLY_KEEP}" ]; then
|
|
log "Removing old monthly backups on NAS (keeping ${MONTHLY_KEEP})"
|
|
ssh_nas "cd ${SYNOLOGY_MONTHLY} && ls -1t *.tar.gz | tail -n +$((MONTHLY_KEEP + 1)) | while read f; do rm -f \"\$f\" \"\$f.sha256\"; done"
|
|
fi
|
|
|
|
log "NAS rotation completed"
|
|
}
|
|
|
|
# ===========================
|
|
# REPORTING
|
|
# ===========================
|
|
|
|
# Display backup statistics summary
|
|
# Shows: local storage status, NAS backup counts and sizes
|
|
show_statistics() {
|
|
log "=== Backup Statistics ==="
|
|
|
|
# Local storage statistics
|
|
log "Local storage:"
|
|
local latest_local
|
|
latest_local=$(ls -1t "${BACKUP_LOCAL_DAILY}"/*.tar.gz 2>/dev/null | head -1)
|
|
[ -z "${latest_local}" ] && latest_local="none"
|
|
|
|
if [ "${latest_local}" != "none" ]; then
|
|
local latest_size=$(du -h "${latest_local}" | cut -f1)
|
|
log " Latest backup: $(basename ${latest_local}) (${latest_size})"
|
|
else
|
|
log " Latest backup: none"
|
|
fi
|
|
log " Local size: $(du -sh ${BACKUP_LOCAL} 2>/dev/null | cut -f1 || echo '0')"
|
|
|
|
# NAS storage statistics
|
|
log "NAS storage:"
|
|
local nas_daily=$(ssh_nas "ls -1 ${SYNOLOGY_DAILY}/*.tar.gz 2>/dev/null | wc -l" || echo "0")
|
|
local nas_weekly=$(ssh_nas "ls -1 ${SYNOLOGY_WEEKLY}/*.tar.gz 2>/dev/null | wc -l" || echo "0")
|
|
local nas_monthly=$(ssh_nas "ls -1 ${SYNOLOGY_MONTHLY}/*.tar.gz 2>/dev/null | wc -l" || echo "0")
|
|
local nas_size=$(ssh_nas "du -sh ${SYNOLOGY_PATH} 2>/dev/null | cut -f1" || echo "0")
|
|
|
|
log " Daily: ${nas_daily} backups"
|
|
log " Weekly: ${nas_weekly} backups"
|
|
log " Monthly: ${nas_monthly} backups"
|
|
log " Total NAS size: ${nas_size}"
|
|
}
|
|
|
|
# ===========================
|
|
# MAIN EXECUTION
|
|
# ===========================
|
|
|
|
# Main backup workflow orchestration
|
|
# Executes: checks → backup → archive → upload → cleanup → report
|
|
main() {
|
|
log "=== Starting Supabase Backup (Production Version) ==="
|
|
|
|
# Phase 1: Pre-flight checks
|
|
check_prerequisites
|
|
check_disk_space
|
|
check_nas_connectivity
|
|
create_dirs
|
|
|
|
# Phase 2: Backup assembly
|
|
TEMP_DIR=$(mktemp -d -p /tmp supabase_backup_XXXXXX)
|
|
log "Using temporary directory: ${TEMP_DIR}"
|
|
|
|
# Setup cleanup trap (removes temp files on exit)
|
|
trap "sudo rm -rf ${TEMP_DIR} 2>/dev/null || rm -rf ${TEMP_DIR}; rm -f /tmp/supabase_*.tar.gz 2>/dev/null" EXIT
|
|
|
|
# Perform backups
|
|
backup_postgres "${TEMP_DIR}"
|
|
backup_storage "${TEMP_DIR}"
|
|
backup_config_files "${TEMP_DIR}"
|
|
backup_metadata "${TEMP_DIR}"
|
|
|
|
# Phase 3: Archive creation
|
|
TEMP_BACKUP="/tmp/${BACKUP_NAME}"
|
|
create_archive "${TEMP_DIR}" "${TEMP_BACKUP}"
|
|
verify_archive "${TEMP_BACKUP}"
|
|
|
|
# Phase 4: Save locally
|
|
cp "${TEMP_BACKUP}" "${BACKUP_LOCAL_DAILY}/${BACKUP_NAME}"
|
|
log "Saved locally: ${BACKUP_LOCAL_DAILY}/${BACKUP_NAME}"
|
|
|
|
# Create checksum
|
|
create_checksum "${BACKUP_LOCAL_DAILY}/${BACKUP_NAME}" "${BACKUP_LOCAL_DAILY}/${BACKUP_NAME}.sha256"
|
|
|
|
# Phase 5: Determine backup type (daily/weekly/monthly)
|
|
BACKUP_TYPE="daily"
|
|
|
|
if [ "${DAY_OF_WEEK}" -eq 1 ]; then
|
|
BACKUP_TYPE="weekly"
|
|
log "Creating weekly backup on NAS"
|
|
fi
|
|
|
|
if [ "${DAY_OF_MONTH}" -eq "01" ]; then
|
|
BACKUP_TYPE="monthly"
|
|
log "Creating monthly backup on NAS"
|
|
fi
|
|
|
|
# Phase 6: Upload to NAS
|
|
upload_to_nas "${BACKUP_LOCAL_DAILY}/${BACKUP_NAME}" "${BACKUP_TYPE}"
|
|
|
|
# Phase 7: Cleanup and reporting
|
|
cleanup_local
|
|
rotate_nas_backups
|
|
show_statistics
|
|
|
|
log "=== Backup completed successfully ==="
|
|
}
|
|
|
|
# Execute main function with all script arguments
|
|
main "$@" |