add backup multiple schemas
This commit is contained in:
@@ -36,4 +36,4 @@ COMPRESS_TENANT=true
|
|||||||
# --- Target Identifiers ---
|
# --- Target Identifiers ---
|
||||||
|
|
||||||
# The name of the schema to be exported when BACKUP_TYPE is 'schema' or 'all'.
|
# The name of the schema to be exported when BACKUP_TYPE is 'schema' or 'all'.
|
||||||
SCHEMA_NAME="MYSCHEMA"
|
SCHEMA_NAMES="MYSCHEMA"
|
||||||
|
|||||||
@@ -3,8 +3,8 @@
|
|||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
# SAP HANA Backup Script
|
# SAP HANA Backup Script
|
||||||
#
|
#
|
||||||
# Performs schema exports and/or tenant backups for a SAP HANA database.
|
# Performs schema exports for one or more schemas and/or tenant backups for a
|
||||||
# Designed to be executed via a cronjob.
|
# SAP HANA database. Designed to be executed via a cronjob.
|
||||||
# Reads all settings from the backup.conf file in the same directory.
|
# Reads all settings from the backup.conf file in the same directory.
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
|
|
||||||
@@ -38,41 +38,42 @@ fi
|
|||||||
# --- Functions ---
|
# --- Functions ---
|
||||||
|
|
||||||
# Performs a binary export of a specific schema.
|
# Performs a binary export of a specific schema.
|
||||||
|
# Accepts the schema name as its first argument.
|
||||||
perform_schema_export() {
|
perform_schema_export() {
|
||||||
echo "⬇️ Starting schema export for '${SCHEMA_NAME}'..."
|
local schema_name="$1"
|
||||||
|
if [[ -z "$schema_name" ]]; then
|
||||||
|
echo " ❌ Error: No schema name provided to perform_schema_export function."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "⬇️ Starting schema export for '${schema_name}'..."
|
||||||
|
|
||||||
local timestamp
|
local timestamp
|
||||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||||
local export_base_dir="${BACKUP_BASE_DIR}/schema"
|
local export_base_dir="${BACKUP_BASE_DIR}/schema"
|
||||||
local export_path="${export_base_dir}/${SCHEMA_NAME}_${timestamp}"
|
local export_path="${export_base_dir}/${schema_name}_${timestamp}"
|
||||||
local query_export_path="$export_path" # Default path for the EXPORT query
|
local query_export_path="$export_path" # Default path for the EXPORT query
|
||||||
|
|
||||||
# NEW: If compression is enabled, export to a temporary directory
|
|
||||||
if [[ "$COMPRESS_SCHEMA" == "true" ]]; then
|
if [[ "$COMPRESS_SCHEMA" == "true" ]]; then
|
||||||
export_path="${export_base_dir}/tmp/${SCHEMA_NAME}_${timestamp}"
|
export_path="${export_base_dir}/tmp/${schema_name}_${timestamp}"
|
||||||
query_export_path="$export_path"
|
query_export_path="$export_path"
|
||||||
echo " ℹ️ Compression enabled. Using temporary export path: ${export_path}"
|
echo " ℹ️ Compression enabled. Using temporary export path: ${export_path}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local archive_file="${export_base_dir}/${SCHEMA_NAME}_${timestamp}.tar.gz"
|
local archive_file="${export_base_dir}/${schema_name}_${timestamp}.tar.gz"
|
||||||
|
|
||||||
# Create the target directory if it doesn't exist
|
|
||||||
mkdir -p "$(dirname "$export_path")"
|
mkdir -p "$(dirname "$export_path")"
|
||||||
|
|
||||||
# Construct and execute the EXPORT query
|
local query="EXPORT \"${schema_name}\".\"*\" AS BINARY INTO '${query_export_path}' WITH REPLACE THREADS ${THREADS};"
|
||||||
local query="EXPORT \"${SCHEMA_NAME}\".\"*\" AS BINARY INTO '${query_export_path}' WITH REPLACE THREADS ${THREADS};"
|
|
||||||
|
|
||||||
# We redirect stdout and stderr to /dev/null for cleaner cron logs.
|
|
||||||
"$HDBSQL_PATH" -U "$USER_KEY" "$query" > /dev/null 2>&1
|
"$HDBSQL_PATH" -U "$USER_KEY" "$query" > /dev/null 2>&1
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
|
||||||
if [[ "$exit_code" -eq 0 ]]; then
|
if [[ "$exit_code" -eq 0 ]]; then
|
||||||
echo " ✅ Successfully exported schema '${SCHEMA_NAME}'."
|
echo " ✅ Successfully exported schema '${schema_name}'."
|
||||||
|
|
||||||
# NEW: Conditional compression logic
|
|
||||||
if [[ "$COMPRESS_SCHEMA" == "true" ]]; then
|
if [[ "$COMPRESS_SCHEMA" == "true" ]]; then
|
||||||
echo " 🗜️ Compressing exported files..."
|
echo " 🗜️ Compressing exported files..."
|
||||||
# Use -C to change directory, ensuring the archive doesn't contain the 'tmp' path
|
|
||||||
tar -czf "$archive_file" -C "$(dirname "$export_path")" "$(basename "$export_path")"
|
tar -czf "$archive_file" -C "$(dirname "$export_path")" "$(basename "$export_path")"
|
||||||
local tar_exit_code=$?
|
local tar_exit_code=$?
|
||||||
|
|
||||||
@@ -80,7 +81,6 @@ perform_schema_export() {
|
|||||||
echo " ✅ Successfully created archive '${archive_file}'."
|
echo " ✅ Successfully created archive '${archive_file}'."
|
||||||
echo " 🧹 Cleaning up temporary directory..."
|
echo " 🧹 Cleaning up temporary directory..."
|
||||||
rm -rf "$export_path"
|
rm -rf "$export_path"
|
||||||
# Clean up the tmp parent if it's empty
|
|
||||||
rmdir --ignore-fail-on-non-empty "$(dirname "$export_path")"
|
rmdir --ignore-fail-on-non-empty "$(dirname "$export_path")"
|
||||||
echo " ✨ Cleanup complete."
|
echo " ✨ Cleanup complete."
|
||||||
else
|
else
|
||||||
@@ -90,10 +90,24 @@ perform_schema_export() {
|
|||||||
echo " ℹ️ Compression disabled. Raw export files are located at '${export_path}'."
|
echo " ℹ️ Compression disabled. Raw export files are located at '${export_path}'."
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo " ❌ Error: Failed to export schema '${SCHEMA_NAME}' (hdbsql exit code: ${exit_code})."
|
echo " ❌ Error: Failed to export schema '${schema_name}' (hdbsql exit code: ${exit_code})."
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# NEW: Loops through the schemas in the config file and runs an export for each.
|
||||||
|
run_all_schema_exports() {
|
||||||
|
if [[ -z "$SCHEMA_NAMES" ]]; then
|
||||||
|
echo " ⚠️ Warning: SCHEMA_NAMES variable is not set in config. Skipping schema export."
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🔎 Found schemas to export: ${SCHEMA_NAMES}"
|
||||||
|
for schema in $SCHEMA_NAMES; do
|
||||||
|
perform_schema_export "$schema"
|
||||||
|
echo "--------------------------------------------------"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
# Performs a full backup of the tenant database.
|
# Performs a full backup of the tenant database.
|
||||||
perform_tenant_backup() {
|
perform_tenant_backup() {
|
||||||
echo "⬇️ Starting tenant backup..."
|
echo "⬇️ Starting tenant backup..."
|
||||||
@@ -104,7 +118,6 @@ perform_tenant_backup() {
|
|||||||
local backup_path_prefix
|
local backup_path_prefix
|
||||||
local backup_target_dir
|
local backup_target_dir
|
||||||
|
|
||||||
# NEW: Determine backup path based on compression setting
|
|
||||||
if [[ "$COMPRESS_TENANT" == "true" ]]; then
|
if [[ "$COMPRESS_TENANT" == "true" ]]; then
|
||||||
backup_target_dir="${backup_base_dir}/tmp"
|
backup_target_dir="${backup_base_dir}/tmp"
|
||||||
backup_path_prefix="${backup_target_dir}/backup_${timestamp}"
|
backup_path_prefix="${backup_target_dir}/backup_${timestamp}"
|
||||||
@@ -114,25 +127,19 @@ perform_tenant_backup() {
|
|||||||
backup_path_prefix="${backup_target_dir}/backup_${timestamp}"
|
backup_path_prefix="${backup_target_dir}/backup_${timestamp}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create the target directory if it doesn't exist
|
|
||||||
mkdir -p "$backup_target_dir"
|
mkdir -p "$backup_target_dir"
|
||||||
|
|
||||||
# The USER_KEY must be configured to connect to the desired tenant database.
|
|
||||||
local query="BACKUP DATA USING FILE ('${backup_path_prefix}')"
|
local query="BACKUP DATA USING FILE ('${backup_path_prefix}')"
|
||||||
|
|
||||||
# We redirect stdout and stderr to /dev/null for cleaner cron logs.
|
|
||||||
"$HDBSQL_PATH" -U "$USER_KEY" "$query" > /dev/null 2>&1
|
"$HDBSQL_PATH" -U "$USER_KEY" "$query" > /dev/null 2>&1
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
|
||||||
if [[ "$exit_code" -eq 0 ]]; then
|
if [[ "$exit_code" -eq 0 ]]; then
|
||||||
echo " ✅ Successfully initiated tenant backup with prefix '${backup_path_prefix}'."
|
echo " ✅ Successfully initiated tenant backup with prefix '${backup_path_prefix}'."
|
||||||
|
|
||||||
# NEW: Conditional compression logic
|
|
||||||
if [[ "$COMPRESS_TENANT" == "true" ]]; then
|
if [[ "$COMPRESS_TENANT" == "true" ]]; then
|
||||||
local archive_file="${backup_base_dir}/backup_${timestamp}.tar.gz"
|
local archive_file="${backup_base_dir}/backup_${timestamp}.tar.gz"
|
||||||
echo " 🗜️ Compressing backup files..."
|
echo " 🗜️ Compressing backup files..."
|
||||||
# The backup creates multiple files starting with the prefix. We compress the whole temp dir.
|
|
||||||
# Using -C and '.' ensures we archive the contents of the directory, not the directory itself.
|
|
||||||
tar -czf "$archive_file" -C "$backup_target_dir" .
|
tar -czf "$archive_file" -C "$backup_target_dir" .
|
||||||
local tar_exit_code=$?
|
local tar_exit_code=$?
|
||||||
|
|
||||||
@@ -154,19 +161,17 @@ perform_tenant_backup() {
|
|||||||
|
|
||||||
echo "⚙️ Starting HANA backup process..."
|
echo "⚙️ Starting HANA backup process..."
|
||||||
|
|
||||||
# Ensure the base directory exists
|
|
||||||
mkdir -p "$BACKUP_BASE_DIR"
|
mkdir -p "$BACKUP_BASE_DIR"
|
||||||
|
|
||||||
case "$BACKUP_TYPE" in
|
case "$BACKUP_TYPE" in
|
||||||
schema)
|
schema)
|
||||||
perform_schema_export
|
run_all_schema_exports
|
||||||
;;
|
;;
|
||||||
tenant)
|
tenant)
|
||||||
perform_tenant_backup
|
perform_tenant_backup
|
||||||
;;
|
;;
|
||||||
all)
|
all)
|
||||||
perform_schema_export
|
run_all_schema_exports
|
||||||
echo "" # Add a newline for better readability
|
|
||||||
perform_tenant_backup
|
perform_tenant_backup
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
|
|||||||
Reference in New Issue
Block a user