Files
HanaToolbox/Services/BackupService.cs
2026-03-02 20:53:28 +01:00

233 lines
9.0 KiB
C#

using HanaToolbox.Config;
using HanaToolbox.Logging;
using HanaToolbox.Services.Interfaces;
namespace HanaToolbox.Services;
/// <summary>
/// Handles tenant backups and schema exports/imports.
/// All hdbsql and file-creation operations run as &lt;sid&gt;adm via IUserSwitcher.
/// Compression (tar/pigz) runs as &lt;sid&gt;adm so the archive is owned by the HANA user.
/// </summary>
public sealed class BackupService(
IUserSwitcher switcher,
IHdbClientLocator locator,
INotificationService ntfy,
AppLogger logger) : IBackupService
{
public async Task RunAsync(
BackupConfig config, HanaConfig hana, string sid,
CancellationToken ct = default)
{
var hdbsql = locator.LocateHdbsql(hana.HdbsqlPath, sid, hana.InstanceNumber);
var threads = ResolveThreads(config.Threads);
switch (config.Type)
{
case BackupType.Schema:
await RunSchemaExportsAsync(config, hdbsql, sid, threads, ct);
break;
case BackupType.Tenant:
await RunTenantBackupAsync(config, hdbsql, sid, ct);
break;
case BackupType.All:
await RunSchemaExportsAsync(config, hdbsql, sid, threads, ct);
await RunTenantBackupAsync(config, hdbsql, sid, ct);
break;
}
}
// ── Public helpers used by ExportCommand / ImportCommand ─────────────────
public async Task ExportSchemaAsync(
string hdbsql, string userKey, string schema, string targetPath,
int threads, bool compress, string sid, CancellationToken ct)
{
logger.Step($"Exporting schema '{schema}' to '{targetPath}'...");
Directory.CreateDirectory(targetPath);
string exportDir = targetPath;
string? archivePath = null;
if (compress)
{
var tmpName = $"export_{schema}_{DateTime.Now:yyyyMMdd_HHmmss}";
exportDir = Path.Combine(targetPath, tmpName);
archivePath = Path.Combine(targetPath, $"{schema}_{DateTime.Now:yyyyMMdd_HHmmss}.tar.gz");
await RunAs(sid, $"mkdir -p \"{exportDir}\"", ct);
}
var sql = $"EXPORT \"{schema}\".\"*\" AS BINARY INTO '{exportDir}' WITH REPLACE THREADS {threads} NO DEPENDENCIES;";
var result = await RunHdbsqlAsync(hdbsql, userKey, sql, sid, ct);
if (!result.Success)
{
logger.Error($"Schema export failed for '{schema}': {result.StdErr}");
await ntfy.SendAsync("HANA Export Failed", $"Export of schema '{schema}' FAILED.", ct);
return;
}
if (compress && archivePath != null)
await CompressAsync(exportDir, archivePath, sid, threads, ct);
logger.Success($"Schema export of '{schema}' complete.");
await ntfy.SendAsync("HANA Export", $"Export of schema '{schema}' completed successfully.", ct);
}
public async Task ImportSchemaAsync(
string hdbsql, string userKey, string schema, string sourcePath,
int threads, bool compress, bool replace, string? newSchema, string sid, CancellationToken ct)
{
logger.Step($"Importing schema '{schema}'{(newSchema != null ? $" as '{newSchema}'" : "")}...");
string importDir = sourcePath;
string? tmpDir = null;
if (compress)
{
tmpDir = Path.Combine("/tmp", $"import_{schema}_{Path.GetRandomFileName()}");
await RunAs(sid, $"mkdir -p \"{tmpDir}\"", ct);
var decompResult = await RunAs(sid, $"tar -xzf \"{sourcePath}\" -C \"{tmpDir}\" --strip-components=1", ct);
if (!decompResult.Success)
{
logger.Error($"Decompression failed: {decompResult.StdErr}");
return;
}
importDir = tmpDir;
}
var mode = replace ? "REPLACE" : "IGNORE EXISTING";
var rename = newSchema != null ? $" RENAME SCHEMA \"{schema}\" TO \"{newSchema}\"" : string.Empty;
var sql = $"IMPORT \"{schema}\".\"*\" AS BINARY FROM '{importDir}' WITH {mode}{rename} THREADS {threads};";
var result = await RunHdbsqlAsync(hdbsql, userKey, sql, sid, ct);
var target = newSchema ?? schema;
if (!result.Success)
{
logger.Error($"Import failed: {result.StdErr}");
await ntfy.SendAsync("HANA Import Failed", $"Import of '{schema}' to '{target}' FAILED.", ct);
}
else
{
logger.Success("Import complete.");
await ntfy.SendAsync("HANA Import", $"Import of '{schema}' to '{target}' completed.", ct);
}
if (tmpDir != null)
await RunAs(sid, $"rm -rf \"{tmpDir}\"", ct);
}
// ── Private helpers ───────────────────────────────────────────────────────
private async Task RunSchemaExportsAsync(
BackupConfig config, string hdbsql, string sid, int threads, CancellationToken ct)
{
foreach (var schema in config.SchemaNames)
await ExportSchemaAsync(hdbsql, config.UserKey, schema,
config.SchemaBackupPath, threads, config.CompressSchema, sid, ct);
}
private async Task RunTenantBackupAsync(
BackupConfig config, string hdbsql, string sid, CancellationToken ct)
{
await BackupTenantAsync(hdbsql, config.UserKey, config.BackupBasePath,
config.Compress, sid, ct);
if (config.BackupSystemDb && !string.IsNullOrWhiteSpace(config.SystemDbUserKey))
await BackupTenantAsync(hdbsql, config.SystemDbUserKey, config.BackupBasePath,
config.Compress, sid, ct);
}
private async Task BackupTenantAsync(
string hdbsql, string userKey, string basePath,
bool compress, string sid, CancellationToken ct)
{
logger.Step("Starting tenant backup...");
var ts = DateTime.Now.ToString("yyyyMMdd_HHmmss");
string backupDir = basePath;
string? archivePath = null;
if (compress)
{
backupDir = Path.Combine(basePath, $"backup_{ts}");
archivePath = Path.Combine(basePath, $"backup_{ts}.tar.gz");
await RunAs(sid, $"mkdir -p \"{backupDir}\"", ct);
}
else
{
await RunAs(sid, $"mkdir -p \"{backupDir}\"", ct);
}
var prefix = Path.Combine(backupDir, $"backup_{ts}");
var sql = $"BACKUP DATA USING FILE ('{prefix}');";
var result = await RunHdbsqlAsync(hdbsql, userKey, sql, sid, ct);
if (!result.Success)
{
logger.Error($"Tenant backup failed: {result.StdErr}");
await ntfy.SendAsync("HANA Backup Failed", "Tenant backup FAILED.", ct);
if (compress) await RunAs(sid, $"rm -rf \"{backupDir}\"", ct);
return;
}
if (compress && archivePath != null)
await CompressAsync(backupDir, archivePath, sid, 0, ct);
logger.Success("Tenant backup complete.");
await ntfy.SendAsync("HANA Backup", "Tenant backup completed successfully.", ct);
}
private async Task CompressAsync(
string sourceDir, string archivePath, string sid, int threads, CancellationToken ct)
{
logger.Step($"Compressing '{sourceDir}' → '{archivePath}'...");
// Check for pigz availability
var whichPigz = await RunAs(sid, "which pigz 2>/dev/null", ct);
string tarCmd;
if (whichPigz.Success && !string.IsNullOrWhiteSpace(whichPigz.StdOut))
{
var pigzThreads = threads > 0 ? threads : ResolveThreads(0);
tarCmd = $"tar -I \"pigz -p {pigzThreads}\" -cf \"{archivePath}\" -C \"{sourceDir}\" .";
}
else
{
tarCmd = $"tar -czf \"{archivePath}\" -C \"{sourceDir}\" .";
}
var result = await RunAs(sid, tarCmd, ct);
if (!result.Success)
{
logger.Error($"Compression failed: {result.StdErr}");
return;
}
await RunAs(sid, $"rm -rf \"{sourceDir}\"", ct);
logger.Success("Compression complete.");
}
private async Task<ProcessResult> RunHdbsqlAsync(
string hdbsql, string userKey, string sql, string sid, CancellationToken ct)
{
// Write SQL to a temp file so no shell-quoting complications arise
var tmpFile = Path.Combine("/tmp", $"ht_{Guid.NewGuid():N}.sql");
await File.WriteAllTextAsync(tmpFile, sql, ct);
// chmod so ndbadm can read it
await switcher.RunAsAsync(sid, $"chmod 644 \"{tmpFile}\" 2>/dev/null; true", ct);
var result = await switcher.RunAsAsync(sid,
$"\"{hdbsql}\" -U {userKey} -I \"{tmpFile}\" 2>&1", ct);
File.Delete(tmpFile);
return result;
}
private Task<ProcessResult> RunAs(string sid, string cmd, CancellationToken ct) =>
switcher.RunAsAsync(sid, cmd, ct);
private static int ResolveThreads(int configured) =>
configured > 0 ? configured : Math.Max(1, Environment.ProcessorCount / 2);
}