热门角色不仅是灵感来源,更是你的效率助手。通过精挑细选的角色提示词,你可以快速生成高质量内容、提升创作灵感,并找到最契合你需求的解决方案。让创作更轻松,让价值更直接!
我们根据不同用户需求,持续更新角色库,让你总能找到合适的灵感入口。
本提示词专门用于生成高效可靠的数据库备份与恢复脚本,能够根据不同的数据库类型和业务需求,自动创建定制化的脚本方案。通过智能分析备份频率、存储位置等关键参数,确保脚本具备生产环境直接部署的能力,有效保障数据安全,减少系统停机时间,简化数据库运维管理流程,提升数据保护的整体效率和可靠性。
以下方案为生产环境可直接部署的 MySQL 日常(daily)全量备份与恢复脚本,面向 Linux 主机,并将备份归档到提供的存储位置:/mnt/nfs/backup/mysql/prod。方案特点:
建议在数据库主机上以有权限的运维账户执行,并确保对 MySQL 及数据目录具备必要访问权限。
文件名:mysql_backup_daily.sh 用途:每日执行一次全量逻辑备份(mysqldump),可选复制已关闭的 binlog 文件用于 PITR。
#!/usr/bin/env bash
# MySQL 日常全量备份脚本(安全版)
# - 目标存储位置:/mnt/nfs/backup/mysql/prod
# - 凭据使用 mysql_config_editor --login-path=backup 管理
# - 支持备份校验、日志记录、并发保护、保留策略与可选 binlog 复制
#
# 依赖:bash, mysql, mysqldump, mysqlbinlog, pigz(或gzip), sha256sum, findmnt
# 运行环境:Linux
#
# 使用前:
#   mysql_config_editor set --login-path=backup --host=127.0.0.1 --user=backup_user --password
#   # backup_user 应具备最小权限:SELECT, RELOAD, LOCK TABLES, SHOW VIEW, EVENT, TRIGGER
#   # 如需复制 binlog 文件,执行脚本用户需具备读取 datadir 的权限
set -Eeuo pipefail
IFS=$'\n\t'
umask 077
############ 用户可配置参数(也可通过环境变量覆盖) ############
BACKUP_BASE="${BACKUP_BASE:-/mnt/nfs/backup/mysql/prod}"
LOGIN_PATH="${LOGIN_PATH:-backup}"
RETENTION_DAYS="${RETENTION_DAYS:-14}"            # 备份保留天数
COPY_BINLOGS="${COPY_BINLOGS:-true}"              # 是否复制已生成的 binlog 文件以支持 PITR
COMPRESSOR="${COMPRESSOR:-pigz}"                  # pigz 优先,备选 gzip
COMPRESS_LEVEL="${COMPRESS_LEVEL:-6}"             # 压缩级别(1-9)
DUMP_TIMEOUT_SECONDS="${DUMP_TIMEOUT_SECONDS:-0}" # 0 表示不设置超时
NICE_LEVEL="${NICE_LEVEL:-10}"
IONICE_CLASS="${IONICE_CLASS:-3}"                 # 3=idle
LOG_DIR="${LOG_DIR:-$BACKUP_BASE}"
LOCK_FILE="${LOCK_FILE:-/var/lock/mysql_backup_daily.lock}"
REQUIRE_NFS="${REQUIRE_NFS:-true}"                # 要求备份目录挂载为 NFS
HOST_TAG="${HOST_TAG:-$(hostname -s)}"
###################################################
# 全局变量
TIMESTAMP="$(date +'%F_%H%M%S')"
RUN_DIR="$BACKUP_BASE/$TIMESTAMP"
LOG_FILE="$LOG_DIR/backup_${HOST_TAG}_${TIMESTAMP}.log"
META_FILE="$RUN_DIR/metadata.json"
DUMP_FILE="$RUN_DIR/mysqldump_all.sql.gz"
CHECKSUM_FILE="$RUN_DIR/mysqldump_all.sql.gz.sha256"
BINLOG_DIR="$RUN_DIR/binlogs"
# 日志函数
log() { echo "[$(date +'%F %T')] $*" | tee -a "$LOG_FILE"; }
die() { log "ERROR: $*"; exit 1; }
# 并发锁
exec 9>"$LOCK_FILE"
if ! flock -n 9; then
  die "另一个备份任务正在运行(锁文件:$LOCK_FILE)"
fi
# 前置检查
preflight() {
  mkdir -p "$BACKUP_BASE" "$RUN_DIR" "$LOG_DIR"
  touch "$LOG_FILE"
  log "备份开始:$TIMESTAMP,目标目录:$RUN_DIR"
  # 校验备份目录可写
  [ -w "$BACKUP_BASE" ] || die "备份目录不可写:$BACKUP_BASE"
  # 校验是否为 NFS 挂载(可选)
  if [ "$REQUIRE_NFS" = "true" ]; then
    if command -v findmnt >/dev/null 2>&1; then
      fstype="$(findmnt -T "$BACKUP_BASE" -n -o FSTYPE || true)"
      case "$fstype" in
        nfs|nfs4) log "NFS 检测通过($fstype)";;
        *) die "备份目录非 NFS 文件系统(检测结果:$fstype),请检查挂载或将 REQUIRE_NFS=false";;
      esac
    else
      log "警告:findmnt 不存在,跳过 NFS 检测"
    fi
  fi
  # 校验命令依赖
  for c in mysql mysqldump sha256sum; do
    command -v "$c" >/dev/null 2>&1 || die "命令不存在:$c"
  done
  if ! command -v "$COMPRESSOR" >/dev/null 2>&1; then
    log "压缩工具 $COMPRESSOR 不存在,切换为 gzip"
    COMPRESSOR="gzip"
  fi
  # 测试 MySQL 连接
  if ! mysql --login-path="$LOGIN_PATH" -NBe "SELECT 1" >/dev/null; then
    die "无法通过登录路径 '$LOGIN_PATH' 连接 MySQL,请使用 mysql_config_editor 配置"
  fi
  # 收集基本元数据
  local version server_uuid gtid_mode log_bin datadir
  version="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT @@version")"
  server_uuid="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT @@server_uuid" || echo "")"
  gtid_mode="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT @@gtid_mode" || echo "OFF")"
  log_bin="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT @@log_bin" || echo "OFF")"
  datadir="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT @@datadir" || echo "")"
  # 估算数据库大小并校验可用空间(粗略估算)
  local est_bytes free_bytes
  est_bytes="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT COALESCE(SUM(data_length+index_length),0) FROM information_schema.tables")"
  free_bytes="$(df -P --block-size=1 "$BACKUP_BASE" | awk 'NR==2{print $4}')"
  local need_bytes
  need_bytes=$(( (est_bytes * 13) / 10 + 100*1024*1024 )) # 估算压缩+冗余:1.3x + 100MB
  if [ "$free_bytes" -lt "$need_bytes" ]; then
    log "警告:估算空间不足。估算需要:$need_bytes bytes,可用:$free_bytes bytes。继续尝试备份。"
  fi
  # 写入元数据文件
  cat > "$META_FILE" <<EOF
{
  "timestamp": "$TIMESTAMP",
  "host": "$HOST_TAG",
  "mysql_version": "$version",
  "server_uuid": "$server_uuid",
  "gtid_mode": "$gtid_mode",
  "log_bin_enabled": "$log_bin",
  "datadir": "$datadir"
}
EOF
  log "元数据已写入:$META_FILE"
}
do_dump() {
  log "开始 mysqldump 全量备份..."
  local dump_cmd=(mysqldump
    --login-path="$LOGIN_PATH"
    --all-databases
    --single-transaction        # InnoDB 一致性快照
    --quick                     # 流式读取降低内存占用
    --routines --triggers --events
    --master-data=2             # 写入 binlog 位点注释用于 PITR
    --flush-logs                # 轮转 binlog(如启用)
    --hex-blob
    --default-character-set=utf8mb4
    --set-gtid-purged=OFF       # 适用于同一实例恢复;跨实例/GTID 请看恢复说明
    --column-statistics=0       # 兼容较老客户端
    --dump-date
  )
  mkdir -p "$(dirname "$DUMP_FILE")"
  local comp_cmd=("$COMPRESSOR" "-${COMPRESS_LEVEL}")
  local start_ts="$(date +%s)"
  if [ "$DUMP_TIMEOUT_SECONDS" -gt 0 ]; then
    timeout "$DUMP_TIMEOUT_SECONDS" bash -c \
      "ionice -c $IONICE_CLASS nice -n $NICE_LEVEL ${dump_cmd[*]} 2>>'$LOG_FILE' | ${comp_cmd[*]} >'$DUMP_FILE'"
  else
    ionice -c "$IONICE_CLASS" nice -n "$NICE_LEVEL" \
      "${dump_cmd[@]}" 2>>"$LOG_FILE" | "${comp_cmd[@]}" >"$DUMP_FILE"
  fi
  local pipe_status=${PIPESTATUS[*]}
  local end_ts="$(date +%s)"
  log "mysqldump 完成(duration=$((end_ts-start_ts))s, pipe_status=$pipe_status)"
  # 备份基本校验:存在“Dump completed on”
  if ! zgrep -q "Dump completed on" "$DUMP_FILE"; then
    die "备份校验失败:未检测到 mysqldump 完成标记"
  fi
  # 生成校验和
  sha256sum "$DUMP_FILE" > "$CHECKSUM_FILE"
  log "校验和生成完成:$CHECKSUM_FILE"
}
copy_binlogs() {
  if [ "$COPY_BINLOGS" != "true" ]; then
    log "跳过 binlog 复制(COPY_BINLOGS=$COPY_BINLOGS)"
    return 0
  fi
  # 判断是否开启了 binlog
  local log_bin
  log_bin="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT @@log_bin" || echo "OFF")"
  if [ "$log_bin" != "ON" ]; then
    log "二进制日志未启用(@@log_bin=$log_bin),跳过 binlog 复制"
    return 0
  fi
  log "准备复制 binlog 文件以支持 PITR..."
  mkdir -p "$BINLOG_DIR"
  # 获取 datadir 与 binlog 列表
  local datadir; datadir="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT @@datadir")"
  # 刷新日志已在 mysqldump --flush-logs 执行过,当前活跃日志是最新的一个
  # 复制当前存在的日志(包含之前的日志),恢复时可按起始位点过滤
  mysql --login-path="$LOGIN_PATH" -NBe "SHOW BINARY LOGS" | awk '{print $1}' | while read -r f; do
    if [ -f "$datadir/$f" ]; then
      # 压缩复制以节省空间
      if [ "$COMPRESSOR" = "pigz" ] || [ "$COMPRESSOR" = "gzip" ]; then
        "$COMPRESSOR" "-${COMPRESS_LEVEL}" -c "$datadir/$f" > "$BINLOG_DIR/$f.gz"
      else
        gzip -c "$datadir/$f" > "$BINLOG_DIR/$f.gz"
      fi
      log "已复制 binlog:$f -> $BINLOG_DIR/$f.gz"
    else
      log "警告:未找到 binlog 文件:$datadir/$f(可能权限不足或文件轮转)"
    fi
  done
}
retention() {
  log "执行保留策略:保留最近 $RETENTION_DAYS 天的备份"
  # 仅删除 BACKUP_BASE 目录下的日期子目录
  find "$BACKUP_BASE" -mindepth 1 -maxdepth 1 -type d -mtime +"$RETENTION_DAYS" -print -exec rm -rf {} \; | tee -a "$LOG_FILE" || true
}
main() {
  preflight
  do_dump
  copy_binlogs
  retention
  log "备份已完成,位置:$RUN_DIR"
}
main
文件名:mysql_restore.sh 用途:从指定备份目录恢复数据;支持全量恢复(all-databases)与基于备份中的位点进行二进制日志重放(PITR)。
#!/usr/bin/env bash
# MySQL 恢复脚本(全量 + 可选 PITR)
# 使用 mysql_config_editor 的登录路径进行连接,不在脚本中存储明文密码。
# 注意:
#   1. 全量恢复会导入备份时的所有数据库(包含 mysql 系统库),请在空实例或确认覆盖的环境执行。
#   2. PITR 恢复需要备份中包含 --master-data=2 的位点信息以及对应 binlog 文件。
set -Eeuo pipefail
IFS=$'\n\t'
umask 077
############ 用户可配置参数(也可通过环境变量覆盖) ############
BACKUP_DIR="${BACKUP_DIR:-}"           # 必填:比如 /mnt/nfs/backup/mysql/prod/2025-11-03_020000
LOGIN_PATH="${LOGIN_PATH:-backup}"     # 使用与备份一致的登录路径
RESTORE_MODE="${RESTORE_MODE:-full}"   # full 或 pitr
STOP_DATETIME="${STOP_DATETIME:-}"     # pitr 可选:例如 '2025-11-03 09:30:00'
FORCE_RESTORE="${FORCE_RESTORE:-false}"# 如目标实例非空,需设置 true 才执行
###################################################
log() { echo "[$(date +'%F %T')] $*"; }
die() { log "ERROR: $*"; exit 1; }
require_cmds() {
  for c in mysql zgrep gzip pigz mysqlbinlog; do
    command -v "$c" >/dev/null 2>&1 || true
  done
}
preflight() {
  [ -n "$BACKUP_DIR" ] || die "请设置 BACKUP_DIR,例如:/mnt/nfs/backup/mysql/prod/2025-11-03_020000"
  [ -d "$BACKUP_DIR" ] || die "备份目录不存在:$BACKUP_DIR"
  [ -r "$BACKUP_DIR/mysqldump_all.sql.gz" ] || die "未找到备份文件:$BACKUP_DIR/mysqldump_all.sql.gz"
  [ -r "$BACKUP_DIR/mysqldump_all.sql.gz.sha256" ] || log "警告:未找到校验文件,将跳过校验"
  if ! mysql --login-path="$LOGIN_PATH" -NBe "SELECT 1" >/dev/null; then
    die "无法通过登录路径 '$LOGIN_PATH' 连接 MySQL"
  fi
  # 目标实例是否为空(用户库数量)
  local user_db_count
  user_db_count="$(mysql --login-path="$LOGIN_PATH" -NBe "SELECT COUNT(*) FROM information_schema.schemata WHERE schema_name NOT IN ('mysql','information_schema','performance_schema','sys')")"
  if [ "$user_db_count" -gt 0 ] && [ "$FORCE_RESTORE" != "true" ]; then
    die "目标实例存在用户库(数量:$user_db_count)。如需继续,请设置 FORCE_RESTORE=true 并确保有完整的覆盖计划。"
  fi
  # 校验备份文件完整性
  if [ -r "$BACKUP_DIR/mysqldump_all.sql.gz.sha256" ]; then
    (cd "$BACKUP_DIR" && sha256sum -c mysqldump_all.sql.gz.sha256) || die "备份文件校验失败"
    log "校验通过"
  fi
}
do_full_restore() {
  log "开始全量恢复(all-databases)..."
  # 解压并导入
  if command -v pigz >/dev/null 2>&1; then
    pigz -dc "$BACKUP_DIR/mysqldump_all.sql.gz" | mysql --login-path="$LOGIN_PATH"
  else
    gzip -dc "$BACKUP_DIR/mysqldump_all.sql.gz" | mysql --login-path="$LOGIN_PATH"
  fi
  log "全量恢复完成"
  # 可选:运行表一致性检查(示例)
  mysql --login-path="$LOGIN_PATH" -e "SHOW DATABASES"
}
extract_master_info() {
  # 从压缩的 dump 中提取 MASTER_LOG_FILE 和 MASTER_LOG_POS(--master-data=2 写入注释)
  local master_file master_pos
  master_file="$(zgrep -m1 -E "MASTER_LOG_FILE='[^']+'" "$BACKUP_DIR/mysqldump_all.sql.gz" | sed -E "s/.*MASTER_LOG_FILE='([^']+)'.*/\1/")"
  master_pos="$(zgrep -m1 -E "MASTER_LOG_POS=[0-9]+" "$BACKUP_DIR/mysqldump_all.sql.gz" | sed -E "s/.*MASTER_LOG_POS=([0-9]+).*/\1/")"
  if [ -z "$master_file" ] || [ -z "$master_pos" ]; then
    die "未能从备份中解析到 MASTER_LOG_FILE/MASTER_LOG_POS,请确认备份使用 --master-data=2"
  fi
  echo "$master_file $master_pos"
}
do_pitr_restore() {
  log "开始点时间恢复(PITR)..."
  local info; info="$(extract_master_info)"
  local start_file start_pos
  start_file="$(echo "$info" | awk '{print $1}')"
  start_pos="$(echo "$info" | awk '{print $2}')"
  log "起始位点:file=$start_file, pos=$start_pos"
  # binlog 文件应位于 $BACKUP_DIR/binlogs(由备份脚本复制)
  local binlog_path="$BACKUP_DIR/binlogs"
  [ -d "$binlog_path" ] || die "未找到 binlog 目录:$binlog_path(备份时需启用 COPY_BINLOGS=true)"
  # 列出可用 binlog,找到起始文件及其后的所有文件,按顺序回放
  local files=()
  while IFS= read -r f; do files+=("$f"); done < <(ls -1 "$binlog_path"/*.gz | sort)
  # 确认起始文件存在
  local start_full="$binlog_path/$start_file.gz"
  [ -f "$start_full" ] || die "起始 binlog 文件不存在:$start_full"
  # 构建 mysqlbinlog 参数
  local mb_args=(--start-position="$start_pos")
  if [ -n "$STOP_DATETIME" ]; then
    mb_args+=(--stop-datetime="$STOP_DATETIME")
  fi
  # 从起始文件开始依次重放
  local begin_replay=false
  for f in "${files[@]}"; do
    if [ "$f" = "$start_full" ]; then
      begin_replay=true
    fi
    if [ "$begin_replay" = "true" ]; then
      log "回放:$f"
      # 解压到管道 -> mysqlbinlog -> mysql
      gzip -dc "$f" | mysqlbinlog "${mb_args[@]}" - | mysql --login-path="$LOGIN_PATH"
      # 仅第一个文件需要 start-position,后续文件全量回放
      mb_args=()
    fi
  done
  log "PITR 恢复完成(截至:${STOP_DATETIME:-全部可用日志})"
}
main() {
  require_cmds
  preflight
  case "$RESTORE_MODE" in
    full) do_full_restore ;;
    pitr) do_pitr_restore ;;
    *) die "RESTORE_MODE 必须为 full 或 pitr" ;;
  esac
  log "恢复流程结束"
}
main
请根据实际情况调整以下参数(两脚本顶部都可通过环境变量覆盖):
权限与账户建议:
凭据准备(安全)
部署备份脚本
恢复脚本使用
备份验证
安全建议
无法连接 MySQL(登录路径错误)
备份目录非 NFS 或不可写
备份失败或校验失败
binlog 未复制或 PITR 失败
GTID 环境恢复注意
并发保护误报
如需针对更大规模实例优化(例如使用物理热备 Percona XtraBackup、并行压缩、拆分库并发备份、快照备份等),或设计多站点容灾与恢复演练流程,请告知实例规模、引擎类型、峰值写入速率与恢复时间目标(RTO/RPO),可进一步定制方案。
以下方案面向 SQL Server 环境,提供安全、可审计、可直接部署的备份与恢复脚本,满足以下目标:
建议调度:
下面为完整的 PowerShell 备份脚本。它依赖 SqlServer PowerShell 模块(Invoke-Sqlcmd),使用 Windows 身份验证连接 SQL Server。脚本将对所有在线、非只读的用户数据库执行备份,默认执行差异备份,可通过参数切换为全量备份。备份文件使用证书加密、压缩,并进行校验验证。
<# 
.SYNOPSIS
  SQL Server 安全备份脚本(全量/差异),包含加密、校验、日志与保留策略。
.DESCRIPTION
  - 默认每小时执行差异备份;每日执行全量备份(建议单独调度)。
  - 使用服务器证书加密(AES_256),不允许未加密备份,除非显式允许。
  - 对所有在线、非只读的用户数据库执行备份,记录日志并清理过期备份。
  - 备份完成后执行 RESTORE VERIFYONLY 校验。
.PARAMETERS
  -Instance             SQL Server 实例名(默认 localhost)
  -BackupRoot           备份根目录(UNC 路径)
  -BackupType           Full 或 Differential(默认 Differential)
  -RetentionDays        备份保留天数(默认 7)
  -CertificateName      备份加密证书名称(默认 BackupCert)
  -AllowUnencrypted     允许在证书缺失时执行未加密备份(默认不允许)
  -CommandTimeoutSeconds T-SQL命令超时(秒,默认3600)
.NOTES
  需要:PowerShell 模块 SqlServer;SQL Server 服务账户需对 UNC 路径有写权限。
#>
[CmdletBinding()]
param(
  [string]$Instance = "localhost",
  [Parameter(Mandatory=$true)]
  [string]$BackupRoot,
  [ValidateSet("Full","Differential")]
  [string]$BackupType = "Differential",
  [int]$RetentionDays = 7,
  [ValidatePattern("^[A-Za-z0-9_]+$")]
  [string]$CertificateName = "BackupCert",
  [switch]$AllowUnencrypted,
  [int]$CommandTimeoutSeconds = 3600
)
Set-StrictMode -Version Latest
$ErrorActionPreference = 'Stop'
# 检查并导入 SqlServer 模块
if (-not (Get-Module -ListAvailable -Name SqlServer)) {
  throw "缺少 SqlServer PowerShell 模块。请先安装:Install-Module -Name SqlServer -Scope AllUsers"
}
Import-Module SqlServer | Out-Null
# 日志目录与文件
$LogDir = Join-Path $env:ProgramData "SQLBackup\Logs"
if (-not (Test-Path $LogDir)) { New-Item -ItemType Directory -Path $LogDir | Out-Null }
$timestamp = (Get-Date).ToString("yyyyMMdd_HHmmss")
$LogFile = Join-Path $LogDir "Backup_${BackupType}_${timestamp}.log"
function Write-Log([string]$msg) {
  $line = "[{0}] {1}" -f (Get-Date).ToString("yyyy-MM-dd HH:mm:ss"), $msg
  $line | Tee-Object -FilePath $LogFile -Append
}
Write-Log "开始执行备份。实例: $Instance,类型: $BackupType,根目录: $BackupRoot"
# 路径检测(本机可达性);注意:SQL Server 服务账户必须可写此路径
if (-not (Test-Path $BackupRoot)) {
  throw "备份根目录不可达:$BackupRoot。请确认网络共享与权限。"
}
# 检查证书存在(用于备份加密)
$certExists = $false
try {
  $certExists = [bool](Invoke-Sqlcmd -ServerInstance $Instance -Query @"
SELECT CASE WHEN EXISTS(SELECT 1 FROM master.sys.certificates WHERE name = N'$CertificateName') THEN 1 ELSE 0 END AS CertExists;
"@ -QueryTimeout $CommandTimeoutSeconds).CertExists
} catch {
  throw "无法查询证书状态:$($_.Exception.Message)"
}
if (-not $certExists -and -not $AllowUnencrypted) {
  throw "未发现备份加密证书 [$CertificateName]。为保障安全,脚本已拒绝执行未加密备份。请先创建证书(见‘使用指南’)。如需临时允许未加密备份,可添加 -AllowUnencrypted 开关。"
}
# 获取用户数据库列表
$databases = Invoke-Sqlcmd -ServerInstance $Instance -Query @"
SELECT name
FROM sys.databases
WHERE database_id > 4
  AND state_desc = 'ONLINE'
  AND is_read_only = 0;
"@ -QueryTimeout $CommandTimeoutSeconds | Select-Object -ExpandProperty name
if (-not $databases -or $databases.Count -eq 0) {
  Write-Log "未发现可备份的用户数据库。脚本结束。"
  exit 0
}
Write-Log ("将备份以下数据库:{0}" -f ($databases -join ", "))
foreach ($db in $databases) {
  try {
    $dbDir = Join-Path $BackupRoot $db
    if (-not (Test-Path $dbDir)) { New-Item -ItemType Directory -Path $dbDir | Out-Null }
    $fileStamp = (Get-Date).ToString("yyyyMMddHHmm")
    $fileName = "{0}_{1}_{2}.bak" -f $db, $BackupType.ToUpper(), $fileStamp
    $filePath = Join-Path $dbDir $fileName
    Write-Log "开始备份数据库 [$db] 到 $filePath"
    $encClause = ""
    if ($certExists) {
      $encClause = ", ENCRYPTION(ALGORITHM = AES_256, SERVER CERTIFICATE = [$CertificateName])"
    } elseif ($AllowUnencrypted) {
      Write-Log "警告:证书不存在,按指示执行未加密备份。"
    }
    $diffClause = ($BackupType -eq "Differential") ? "DIFFERENTIAL, " : ""
    $backupTsql = @"
BACKUP DATABASE [$db]
TO DISK = N'$filePath'
WITH INIT, $diffClause COMPRESSION, CHECKSUM, STATS = 10 $encClause;
"@
    Invoke-Sqlcmd -ServerInstance $Instance -Query $backupTsql -QueryTimeout $CommandTimeoutSeconds | Out-Null
    Write-Log "备份完成,开始校验:RESTORE VERIFYONLY [$db]"
    $verifyTsql = "RESTORE VERIFYONLY FROM DISK = N'$filePath' WITH CHECKSUM;"
    Invoke-Sqlcmd -ServerInstance $Instance -Query $verifyTsql -QueryTimeout $CommandTimeoutSeconds | Out-Null
    Write-Log "校验成功:[$db] -> $fileName"
    # 备份保留清理
    $cutoff = (Get-Date).AddDays(-$RetentionDays)
    Get-ChildItem -Path $dbDir -Filter "*.bak" -File |
      Where-Object { $_.LastWriteTime -lt $cutoff } |
      ForEach-Object {
        Write-Log "清理过期备份文件:$($_.FullName)"
        Remove-Item -Path $_.FullName -Force -ErrorAction Stop
      }
  } catch {
    Write-Log "错误:备份数据库 [$db] 失败。原因:$($_.Exception.Message)"
    # 不中断其它库备份,但记录非零退出码
  }
}
Write-Log "备份任务执行完毕。日志文件:$LogFile"
以下 PowerShell 脚本自动选择指定数据库的“最新全量备份 + 最新差异备份”组合进行恢复。支持将数据文件/日志文件移动到指定目录、校验、替换现有数据库等。
<# 
.SYNOPSIS
  SQL Server 安全恢复脚本:从指定目录自动选择最新 FULL + DIFF 进行恢复。
.PARAMETERS
  -Instance             目标 SQL Server 实例名(默认 localhost)
  -BackupRoot           备份根目录(UNC 路径)
  -Database             需要恢复的源数据库名称(对应备份子目录名)
  -TargetDatabase       恢复到的目标数据库名称(默认与源相同)
  -DataDir              数据文件(MDF/NDF)目标目录(可选)
  -LogDir               日志文件(LDF)目标目录(可选)
  -WithReplace          若目标数据库已存在,允许覆盖
  -VerifyOnly           仅校验备份可用性,不实际恢复
  -CommandTimeoutSeconds T-SQL命令超时(秒,默认7200)
.NOTES
  仅适用于 FULL + DIFF 备份链;未包含日志备份的点时间恢复。
#>
[CmdletBinding()]
param(
  [string]$Instance = "localhost",
  [Parameter(Mandatory=$true)]
  [string]$BackupRoot,
  [Parameter(Mandatory=$true)]
  [ValidatePattern("^[A-Za-z0-9_\-]+$")]
  [string]$Database,
  [ValidatePattern("^[A-Za-z0-9_\-]+$")]
  [string]$TargetDatabase,
  [string]$DataDir,
  [string]$LogDir,
  [switch]$WithReplace,
  [switch]$VerifyOnly,
  [int]$CommandTimeoutSeconds = 7200
)
Set-StrictMode -Version Latest
$ErrorActionPreference = 'Stop'
if (-not (Get-Module -ListAvailable -Name SqlServer)) {
  throw "缺少 SqlServer PowerShell 模块。请先安装:Install-Module -Name SqlServer -Scope AllUsers"
}
Import-Module SqlServer | Out-Null
if (-not $TargetDatabase) { $TargetDatabase = $Database }
$srcDir = Join-Path $BackupRoot $Database
if (-not (Test-Path $srcDir)) { throw "未找到备份目录:$srcDir" }
# 找到最新 FULL 与 DIFF
$full = Get-ChildItem -Path $srcDir -Filter "${Database}_FULL_*.bak" -File | Sort-Object LastWriteTime -Descending | Select-Object -First 1
if (-not $full) { throw "未找到全量备份文件:${Database}_FULL_*.bak" }
$fullTime = $full.LastWriteTime
$diff = Get-ChildItem -Path $srcDir -Filter "${Database}_DIFFERENTIAL_*.bak" -File |
        Where-Object { $_.LastWriteTime -ge $fullTime } |
        Sort-Object LastWriteTime -Descending | Select-Object -First 1
if ($VerifyOnly) {
  Write-Host "校验备份文件有效性(VERIFYONLY)..."
  Invoke-Sqlcmd -ServerInstance $Instance -Query "RESTORE VERIFYONLY FROM DISK = N'$($full.FullName)' WITH CHECKSUM;" -QueryTimeout $CommandTimeoutSeconds | Out-Null
  if ($diff) {
    Invoke-Sqlcmd -ServerInstance $Instance -Query "RESTORE VERIFYONLY FROM DISK = N'$($diff.FullName)' WITH CHECKSUM;" -QueryTimeout $CommandTimeoutSeconds | Out-Null
  }
  Write-Host "校验成功。FULL: $($full.Name) $(if ($diff) {"; DIFF: $($diff.Name)"} else {""})"
  exit 0
}
# 获取逻辑文件名与构建 MOVE 子句(若指定了目标路径)
$files = Invoke-Sqlcmd -ServerInstance $Instance -Query "RESTORE FILELISTONLY FROM DISK = N'$($full.FullName)';" -QueryTimeout $CommandTimeoutSeconds
if (-not $files) { throw "无法读取备份文件逻辑文件列表:$($full.FullName)" }
function Build-MoveClauses {
  param([System.Data.DataRow[]]$fileRows, [string]$dataDir, [string]$logDir)
  $clauses = @()
  foreach ($row in $fileRows) {
    $logical = $row.LogicalName
    $type = $row.Type # D or L
    $origPhys = $row.PhysicalName
    $targetPath = $origPhys
    if ($type -eq 'D' -and $dataDir) {
      $baseName = Split-Path $origPhys -Leaf
      $targetPath = Join-Path $dataDir $baseName
    } elseif ($type -eq 'L' -and $logDir) {
      $baseName = Split-Path $origPhys -Leaf
      $targetPath = Join-Path $logDir $baseName
    }
    $clauses += "MOVE N'$logical' TO N'$targetPath'"
  }
  return $clauses -join ", "
}
$moveClause = Build-MoveClauses -fileRows $files -dataDir $DataDir -logDir $LogDir
$replaceClause = $WithReplace.IsPresent ? ", REPLACE" : ""
# 若目标数据库已存在且未指定替换,则提示
$exists = [bool](Invoke-Sqlcmd -ServerInstance $Instance -Query "SELECT CASE WHEN DB_ID(N'$TargetDatabase') IS NULL THEN 0 ELSE 1 END AS ExistsDB;" -QueryTimeout $CommandTimeoutSeconds).ExistsDB
if ($exists -and -not $WithReplace) {
  throw "目标数据库 [$TargetDatabase] 已存在。若需覆盖,请添加 -WithReplace"
}
# 执行恢复:先 FULL(NOREDOVERY),后 DIFF(RECOVERY)
$restoreFull = @"
RESTORE DATABASE [$TargetDatabase]
FROM DISK = N'$($full.FullName)'
WITH NORECOVERY, CHECKSUM, STATS = 10 $replaceClause $(if ($moveClause) {", $moveClause"} else {""});
"@
Invoke-Sqlcmd -ServerInstance $Instance -Query $restoreFull -QueryTimeout $CommandTimeoutSeconds | Out-Null
if ($diff) {
  $restoreDiff = @"
RESTORE DATABASE [$TargetDatabase]
FROM DISK = N'$($diff.FullName)'
WITH RECOVERY, CHECKSUM, STATS = 10;
"@
  Invoke-Sqlcmd -ServerInstance $Instance -Query $restoreDiff -QueryTimeout $CommandTimeoutSeconds | Out-Null
  Write-Host "恢复完成:FULL + DIFF 已应用。目标数据库:$TargetDatabase"
} else {
  # 没有差异则直接 RECOVERY
  $recover = "RESTORE DATABASE [$TargetDatabase] WITH RECOVERY;"
  Invoke-Sqlcmd -ServerInstance $Instance -Query $recover -QueryTimeout $CommandTimeoutSeconds | Out-Null
  Write-Host "恢复完成:仅全量备份。目标数据库:$TargetDatabase"
}
准备工作
首次执行(手工测试)
调度建议
恢复操作(演练与生产)
备份验证与审计
共享权限错误(操作系统错误 5/64)
证书缺失或主密钥未打开
差异备份无法恢复(缺少最新全量)
SqlServer PowerShell 模块缺失
磁盘空间不足
恢复文件路径冲突
性能与备份窗口
注意:如需更低 RPO(小于 1 小时),请增加事务日志备份频率(例如每 15 分钟),并在恢复流程中应用日志备份;这超出当前“每小时差异备份”的范围,可按需扩展脚本。
本方案面向 PostgreSQL 生产环境,提供一套可直接部署的“每周全量物理备份 + 可选 WAL 归档(支持 PITR)”的安全备份与恢复脚本,备份存储到 S3(s3://ops-backup/pg/prod/eu-central),具备以下特性:
说明:
以下包含两部分:
请将脚本保存为 root 可读、postgres 可读的路径(建议 /usr/local/bin),并正确设置权限。
#!/usr/bin/env bash
# PostgreSQL weekly base backup to S3 with verification and logging
# Requires: pg_basebackup, awscli v2, gzip (or pigz optional), sha256sum
# Safe by design: no plaintext passwords; use .pgpass with 0600 permissions or PGPASSFILE env.
set -Eeuo pipefail
umask 027
# ===== User Config (override via env or edit here) =====
PGHOST="${PGHOST:-127.0.0.1}"
PGPORT="${PGPORT:-5432}"
PGUSER="${PGUSER:-replicator}"            # must have REPLICATION privilege
PGPASSFILE="${PGPASSFILE:-/var/lib/postgresql/.pgpass}"  # readable only by owner
# S3 base URI (from user input)
S3_BASE_URI="${S3_BASE_URI:-s3://ops-backup/pg/prod/eu-central}"
# Instance identifier (used in S3 path)
INSTANCE_NAME="${INSTANCE_NAME:-$(hostname -s)}"
# Encryption: "AES256" for SSE-S3 or "aws:kms" for SSE-KMS
AWS_SSE_MODE="${AWS_SSE_MODE:-AES256}"
AWS_KMS_KEY_ID="${AWS_KMS_KEY_ID:-}"   # required if AWS_SSE_MODE=aws:kms
# Work and log dirs (must be writable by the executing user, typically 'postgres')
WORK_DIR="${WORK_DIR:-/var/lib/pg_backup/work}"
LOG_DIR="${LOG_DIR:-/var/log/pg_backup}"
# Retention: delegate to S3 lifecycle. Optional local deletion disabled by default.
ENABLE_LOCAL_RETENTION="${ENABLE_LOCAL_RETENTION:-false}"  # true/false
RETENTION_FULL_WEEKS="${RETENTION_FULL_WEEKS:-4}"
# Compression: prefer pigz if available
COMPRESSOR="$(command -v pigz || echo "$(command -v gzip)")"
COMPRESS_LEVEL="${COMPRESS_LEVEL:--9}"
# ===== End User Config =====
# Derived paths
BACKUP_LABEL="weekly-$(date -u +%Y%m%dT%H%M%SZ)"
RUN_DIR="${WORK_DIR}/run-${BACKUP_LABEL}"
BACKUP_DIR="${RUN_DIR}/backup"               # pg_basebackup output (plain format)
ARCHIVE_NAME="base-${BACKUP_LABEL}.tar.gz"
ARCHIVE_PATH="${RUN_DIR}/${ARCHIVE_NAME}"
SHA256_PATH="${ARCHIVE_PATH}.sha256"
S3_PREFIX="${S3_BASE_URI}/base/${INSTANCE_NAME}/${BACKUP_LABEL}"  # destination path
mkdir -p "${RUN_DIR}" "${LOG_DIR}"
LOG_FILE="${LOG_DIR}/weekly_backup.log"
exec > >(tee -a "${LOG_FILE}") 2>&1
cleanup() {
  echo "[INFO] Cleanup temp dir ${RUN_DIR}"
  rm -rf "${RUN_DIR}" || true
}
trap cleanup EXIT
require_bin() {
  command -v "$1" >/dev/null 2>&1 || { echo "[ERROR] Missing binary: $1"; exit 1; }
}
echo "========== PostgreSQL Weekly Backup Started at $(date -u +'%F %T %Z') =========="
# Check required commands
for b in pg_basebackup aws "${COMPRESSOR##*/}" sha256sum tar; do
  require_bin "${b}"
done
# Check .pgpass
if [[ ! -r "${PGPASSFILE}" ]]; then
  echo "[ERROR] PGPASSFILE not readable: ${PGPASSFILE}"
  exit 1
fi
if [[ $(stat -c "%a" "${PGPASSFILE}") != "600" && $(stat -f "%Lp" "${PGPASSFILE}" 2>/dev/null || echo "600") != "600" ]]; then
  echo "[ERROR] ${PGPASSFILE} must have permission 600"
  exit 1
fi
export PGPASSFILE
# Build AWS SSE parameters
AWS_SSE_ARGS=(--sse "${AWS_SSE_MODE}")
if [[ "${AWS_SSE_MODE}" == "aws:kms" ]]; then
  if [[ -z "${AWS_KMS_KEY_ID}" ]]; then
    echo "[ERROR] AWS_SSE_MODE=aws:kms but AWS_KMS_KEY_ID is empty"
    exit 1
  fi
  AWS_SSE_ARGS+=(--sse-kms-key-id "${AWS_KMS_KEY_ID}")
fi
# Run pg_basebackup (plain directory), include required WAL to make backup consistent
echo "[INFO] Running pg_basebackup with label ${BACKUP_LABEL}"
pg_basebackup \
  -h "${PGHOST}" -p "${PGPORT}" -U "${PGUSER}" \
  -D "${BACKUP_DIR}" \
  -Fp -X stream -c fast -l "${BACKUP_LABEL}" -P
# Verify backup integrity if tool available (PG13+)
if command -v pg_verifybackup >/dev/null 2>&1; then
  echo "[INFO] Verifying backup with pg_verifybackup"
  pg_verifybackup "${BACKUP_DIR}"
else
  echo "[WARN] pg_verifybackup not found; skipping manifest verification."
fi
# Tar and compress
echo "[INFO] Archiving and compressing backup directory"
if [[ -n "${COMPRESSOR}" ]]; then
  # tar -I uses the compressor command directly
  tar -C "${RUN_DIR}" -I "${COMPRESSOR} ${COMPRESS_LEVEL}" -cf "${ARCHIVE_PATH}" "backup"
else
  echo "[ERROR] No compressor found (gzip/pigz)"
  exit 1
fi
# Checksums
sha256sum "${ARCHIVE_PATH}" > "${SHA256_PATH}"
# Upload to S3
echo "[INFO] Uploading to S3: ${S3_PREFIX}"
aws s3 cp --only-show-errors "${ARCHIVE_PATH}" "${S3_PREFIX}/${ARCHIVE_NAME}" "${AWS_SSE_ARGS[@]}"
aws s3 cp --only-show-errors "${SHA256_PATH}" "${S3_PREFIX}/${ARCHIVE_NAME}.sha256"
# Upload backup_manifest if exists (inside directory backup/backup_manifest)
if [[ -f "${BACKUP_DIR}/backup_manifest" ]]; then
  aws s3 cp --only-show-errors "${BACKUP_DIR}/backup_manifest" "${S3_PREFIX}/backup_manifest"
fi
# Optional: minimal metadata
META_JSON="${RUN_DIR}/metadata.json"
{
  echo "{"
  echo "  \"instance\": \"${INSTANCE_NAME}\","
  echo "  \"label\": \"${BACKUP_LABEL}\","
  echo "  \"created_at_utc\": \"$(date -u +%Y-%m-%dT%H:%M:%SZ)\","
  echo "  \"pg_host\": \"${PGHOST}\","
  echo "  \"pg_port\": \"${PGPORT}\""
  echo "}"
} > "${META_JSON}"
aws s3 cp --only-show-errors "${META_JSON}" "${S3_PREFIX}/metadata.json" "${AWS_SSE_ARGS[@]}"
# Optional retention (prefer S3 lifecycle; this is disabled by default)
if [[ "${ENABLE_LOCAL_RETENTION}" == "true" ]]; then
  echo "[INFO] Local retention is managed via S3 lifecycle; script-side deletion is discouraged."
fi
echo "[INFO] Backup completed successfully: ${S3_PREFIX}/${ARCHIVE_NAME}"
echo "========== PostgreSQL Weekly Backup Finished at $(date -u +'%F %T %Z') =========="
仅在需要时间点恢复(PITR)时启用。配置到 postgresql.conf 的 archive_command 中。
#!/usr/bin/env bash
# WAL archive command helper: upload WAL to S3 atomically with verification.
# Usage (from postgresql.conf):
# archive_command = '/usr/local/bin/wal_archive.sh "%p" "%f"'
# Requires: awscli v2
set -Eeuo pipefail
SRC_PATH="$1"      # e.g., /var/lib/postgresql/14/main/pg_wal/0000000100000000000000AB
WAL_NAME="$2"      # e.g., 0000000100000000000000AB
# ===== User Config (must match backup settings) =====
S3_BASE_URI="${S3_BASE_URI:-s3://ops-backup/pg/prod/eu-central}"
INSTANCE_NAME="${INSTANCE_NAME:-$(hostname -s)}"
WAL_S3_PREFIX="${WAL_S3_PREFIX:-${S3_BASE_URI}/wal/${INSTANCE_NAME}}"
AWS_SSE_MODE="${AWS_SSE_MODE:-AES256}"
AWS_KMS_KEY_ID="${AWS_KMS_KEY_ID:-}"
# ===== End Config =====
if [[ -z "${SRC_PATH}" || -z "${WAL_NAME}" ]]; then
  echo "[wal_archive] Missing args" >&2
  exit 1
fi
# Build SSE args
AWS_SSE_ARGS=(--sse "${AWS_SSE_MODE}")
if [[ "${AWS_SSE_MODE}" == "aws:kms" ]]; then
  if [[ -z "${AWS_KMS_KEY_ID}" ]]; then
    echo "[wal_archive] AWS_KMS_KEY_ID required for KMS" >&2
    exit 1
  fi
  AWS_SSE_ARGS+=(--sse-kms-key-id "${AWS_KMS_KEY_ID}")
fi
DEST_URI="${WAL_S3_PREFIX}/${WAL_NAME}"
# Upload WAL segment
aws s3 cp --only-show-errors "${SRC_PATH}" "${DEST_URI}" "${AWS_SSE_ARGS[@]}"
# Verify size matches
# parse s3 uri into bucket and key
parse_s3() {
  local uri="$1"
  uri="${uri#s3://}"
  local bucket="${uri%%/*}"
  local key="${uri#${bucket}/}"
  echo "${bucket}" "${key}"
}
read -r BUCKET KEY <<<"$(parse_s3 "${DEST_URI}")"
LOCAL_SIZE=$(stat -c%s "${SRC_PATH}" 2>/dev/null || stat -f%z "${SRC_PATH}")
REMOTE_SIZE=$(aws s3api head-object --bucket "${BUCKET}" --key "${KEY}" --query 'ContentLength' --output text)
if [[ "${LOCAL_SIZE}" != "${REMOTE_SIZE}" ]]; then
  echo "[wal_archive] Size mismatch local=${LOCAL_SIZE} remote=${REMOTE_SIZE}" >&2
  exit 1
fi
exit 0
包括两部分:
请在恢复主机上执行,建议以 postgres 用户执行,或使用 sudo -u postgres。
#!/usr/bin/env bash
# Restore PostgreSQL from S3 base backup, optionally with PITR if WAL archived.
# Run as 'postgres' user on the target host.
set -Eeuo pipefail
# ===== User Config =====
S3_BASE_URI="${S3_BASE_URI:-s3://ops-backup/pg/prod/eu-central}"
INSTANCE_NAME="${INSTANCE_NAME:-$(hostname -s)}"
PGDATA="${PGDATA:-/var/lib/postgresql/data}"        # target data directory
PGUSER_SYS="${PGUSER_SYS:-postgres}"                # system user owning PGDATA
# Recovery target: "immediate" or a timestamp like "2025-11-02 13:15:00+00"
RECOVERY_TARGET="${RECOVERY_TARGET:-immediate}"
# If empty, script will auto-select the latest backup under base/INSTANCE_NAME/
BACKUP_LABEL="${BACKUP_LABEL:-}"
# WAL prefix (must match wal_archive.sh setting if PITR needed)
WAL_S3_PREFIX="${WAL_S3_PREFIX:-${S3_BASE_URI}/wal/${INSTANCE_NAME}}"
# Temp dir for download
WORK_DIR="${WORK_DIR:-/var/lib/pg_restore/work}"
# ===== End Config =====
require_bin() { command -v "$1" >/dev/null 2>&1 || { echo "[ERROR] Missing binary: $1"; exit 1; }; }
for b in aws tar sha256sum; do require_bin "$b"; done
if [[ "$(id -un)" != "${PGUSER_SYS}" ]]; then
  echo "[ERROR] Please run as '${PGUSER_SYS}' (e.g., sudo -u ${PGUSER_SYS} $0)" >&2
  exit 1
fi
mkdir -p "${WORK_DIR}"
# Determine backup label if not provided (pick latest)
if [[ -z "${BACKUP_LABEL}" ]]; then
  echo "[INFO] Finding latest backup label from S3"
  # List prefixes under base/INSTANCE_NAME/, last line assumed latest due to lexicographic time label
  LIST_OUT=$(aws s3 ls "${S3_BASE_URI}/base/${INSTANCE_NAME}/" | awk '{print $2}' | sed 's#/##' | sort | tail -n 1 || true)
  if [[ -z "${LIST_OUT}" ]]; then
    echo "[ERROR] No backups found in ${S3_BASE_URI}/base/${INSTANCE_NAME}/" >&2
    exit 1
  fi
  BACKUP_LABEL="${LIST_OUT}"
fi
S3_PREFIX="${S3_BASE_URI}/base/${INSTANCE_NAME}/${BACKUP_LABEL}"
ARCHIVE_NAME="base-${BACKUP_LABEL}.tar.gz"
ARCHIVE_PATH="${WORK_DIR}/${ARCHIVE_NAME}"
SHA256_PATH="${ARCHIVE_PATH}.sha256"
echo "[INFO] Selected backup: ${S3_PREFIX}/${ARCHIVE_NAME}"
# Download archive and sha256
aws s3 cp --only-show-errors "${S3_PREFIX}/${ARCHIVE_NAME}" "${ARCHIVE_PATH}"
aws s3 cp --only-show-errors "${S3_PREFIX}/${ARCHIVE_NAME}.sha256" "${SHA256_PATH}"
# Verify checksum
pushd "$(dirname "${ARCHIVE_PATH}")" >/dev/null
sha256sum -c "$(basename "${SHA256_PATH}")"
popd >/dev/null
# Stop PostgreSQL if running (best effort; adjust to your service name)
if command -v pg_ctl >/dev/null 2>&1; then
  echo "[INFO] Attempting to stop PostgreSQL (best effort)"
  pg_ctl -D "${PGDATA}" -m fast stop || true
fi
# Alternatively: systemctl stop postgresql (customize per distro)
# Prepare PGDATA
if [[ -d "${PGDATA}" ]]; then
  echo "[INFO] Moving existing PGDATA to ${PGDATA}.bak.$(date -u +%Y%m%dT%H%M%SZ)"
  mv "${PGDATA}" "${PGDATA}.bak.$(date -u +%Y%m%dT%H%M%SZ)"
fi
mkdir -p "${PGDATA}"
# Extract archive
echo "[INFO] Extracting backup archive to PGDATA"
tar -C "${PGDATA}" --strip-components=1 -xzf "${ARCHIVE_PATH}"
# Ensure permissions
chmod 700 "${PGDATA}"
chown -R "${PGUSER_SYS}:${PGUSER_SYS}" "${PGDATA}"
# Configure restore_command and recovery target in postgresql.auto.conf
AUTO_CONF="${PGDATA}/postgresql.auto.conf"
echo "[INFO] Configuring restore_command and recovery targets in ${AUTO_CONF}"
# Remove any old restore/recovery lines
sed -i '/^restore_command\s*=.*/d;/^recovery_target.*/d;/^recovery_target_action.*/d' "${AUTO_CONF}" 2>/dev/null || true
# Ensure wal_fetch script exists
if ! command -v /usr/local/bin/wal_fetch.sh >/dev/null 2>&1; then
  echo "[ERROR] /usr/local/bin/wal_fetch.sh not found. Install it before PITR." >&2
  echo "[INFO] Proceeding without PITR: restore_command will still be set but WAL fetch may fail."
fi
# Write new settings
{
  echo "restore_command = '/usr/local/bin/wal_fetch.sh \"${WAL_S3_PREFIX}\" \"%f\" \"%p\"'"
  if [[ "${RECOVERY_TARGET}" == "immediate" ]]; then
    echo "recovery_target = 'immediate'"
  else
    echo "recovery_target_time = '${RECOVERY_TARGET}'"
  fi
  echo "recovery_target_action = 'promote'"
} >> "${AUTO_CONF}"
# Create recovery.signal for archive recovery
touch "${PGDATA}/recovery.signal"
echo "[INFO] Starting PostgreSQL for recovery"
if command -v pg_ctl >/dev/null 2>&1; then
  pg_ctl -D "${PGDATA}" -l "${PGDATA}/pg_recovery.log" start
else
  echo "[WARN] pg_ctl not found; start PostgreSQL with your service manager (systemctl start postgresql)"
fi
echo "[INFO] PostgreSQL started. Monitor ${PGDATA}/pg_recovery.log to see recovery progress."
echo "[NOTE] After recovery completes and instance promotes, recovery.signal is removed automatically."
echo "[DONE] Restore procedure finished (service running in recovery -> normal)."
#!/usr/bin/env bash
# Fetch WAL from S3 (used by restore_command during recovery)
# Usage: wal_fetch.sh <S3_WAL_PREFIX> <WAL_NAME> <DEST_PATH>
set -Eeuo pipefail
S3_WAL_PREFIX="$1"
WAL_NAME="$2"
DEST_PATH="$3"
if [[ -z "${S3_WAL_PREFIX}" || -z "${WAL_NAME}" || -z "${DEST_PATH}" ]]; then
  echo "[wal_fetch] Missing args" >&2
  exit 1
fi
TMP_DEST="${DEST_PATH}.part"
aws s3 cp --only-show-errors "${S3_WAL_PREFIX}/${WAL_NAME}" "${TMP_DEST}"
mv -f "${TMP_DEST}" "${DEST_PATH}"
exit 0
请根据实际环境修改以下参数(均已在脚本顶部集中定义):
认证失败(pg_basebackup 报错或连接失败)
权限与依赖问题
S3 访问报错
备份校验失败
WAL 归档失败(PITR 相关)
恢复失败
性能与窗口
如需将方案扩展为“每日增量 + 每周全量”的策略,可在现有 WAL 归档基础上实现更细粒度的恢复点,核心脚本无需变动,仅需调整计划与保留策略。若您提供 PostgreSQL 版本与操作系统版本,我可进一步为您按操作系统(systemd 服务名、目录结构)与 PostgreSQL 版本细化定制脚本参数与命令。
把数据库备份与恢复这件高风险、强依赖专家的工作,变成一条可复用、可落地、可审计的标准化产线。通过输入数据库类型、备份频率、存储位置三项关键信息,即可一键生成贴合业务场景的备份与恢复脚本方案:
为多类型数据库快速制定备份矩阵,生成全量/增量与恢复演练脚本,统一日志与权限,缩短停机窗口。
搭建标准化备份体系,按日周月自动化执行与归档,遇到故障按指引迅速恢复与回滚,降低恢复时间。
无需专职DBA也能落地安全可审计的备份方案,支持云存储与成本优化,保障上线与合规审查。
将模板生成的提示词复制粘贴到您常用的 Chat 应用(如 ChatGPT、Claude 等),即可直接对话使用,无需额外开发。适合个人快速体验和轻量使用场景。
把提示词模板转化为 API,您的程序可任意修改模板参数,通过接口直接调用,轻松实现自动化与批量处理。适合开发者集成与业务系统嵌入。
在 MCP client 中配置对应的 server 地址,让您的 AI 应用自动调用提示词模板。适合高级用户和团队协作,让提示词在不同 AI 工具间无缝衔接。
免费获取高级提示词-优惠即将到期