first commit
This commit is contained in:
6
.gitignore
vendored
Normal file
6
.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
.vscode
|
||||||
|
bin
|
||||||
|
obj
|
||||||
|
|
||||||
|
# HanaToolbox
|
||||||
|
/etc/hanatoolbox
|
||||||
125
Cli/CliArgs.cs
Normal file
125
Cli/CliArgs.cs
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
namespace HanaToolbox.Cli;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Parsed result from the CLI argument array.
|
||||||
|
/// Lightweight, AOT-safe, zero external dependencies.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class CliArgs
|
||||||
|
{
|
||||||
|
public string Command { get; init; } = string.Empty; // e.g. "backup", "cron"
|
||||||
|
public string SubCommand { get; init; } = string.Empty; // e.g. "setup" for "cron setup"
|
||||||
|
public List<string> Positional { get; init; } = []; // non-flag arguments
|
||||||
|
|
||||||
|
public bool Verbose { get; init; }
|
||||||
|
public string Sid { get; init; } = string.Empty;
|
||||||
|
public bool Compress { get; init; }
|
||||||
|
public int Threads { get; init; }
|
||||||
|
public bool Replace { get; init; }
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Parses raw args into a CliArgs instance.
|
||||||
|
///
|
||||||
|
/// Global options: -v | --verbose
|
||||||
|
/// --sid <SID>
|
||||||
|
///
|
||||||
|
/// Command-local options are also parsed here for simplicity (they are
|
||||||
|
/// silently ignored when not applicable to the active command).
|
||||||
|
///
|
||||||
|
/// Usage examples:
|
||||||
|
/// hanatoolbox backup --verbose --sid NDB
|
||||||
|
/// hanatoolbox export MYSCHEMA /hana/backup -c -t 4
|
||||||
|
/// hanatoolbox import-rename SRC DST /path --replace
|
||||||
|
/// hanatoolbox cron setup
|
||||||
|
/// </summary>
|
||||||
|
public static CliArgs Parse(string[] args)
|
||||||
|
{
|
||||||
|
var positional = new List<string>();
|
||||||
|
var verbose = false;
|
||||||
|
var sid = string.Empty;
|
||||||
|
var compress = false;
|
||||||
|
var threads = 0;
|
||||||
|
var replace = false;
|
||||||
|
|
||||||
|
var command = string.Empty;
|
||||||
|
var subCommand = string.Empty;
|
||||||
|
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
// First token that is not a flag is the command
|
||||||
|
// second non-flag token after that may be the subcommand (for "cron setup")
|
||||||
|
while (i < args.Length)
|
||||||
|
{
|
||||||
|
var a = args[i];
|
||||||
|
|
||||||
|
switch (a)
|
||||||
|
{
|
||||||
|
case "-v" or "--verbose":
|
||||||
|
verbose = true;
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case "--sid" when i + 1 < args.Length:
|
||||||
|
sid = args[++i];
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case "-c" or "--compress":
|
||||||
|
compress = true;
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case "--replace":
|
||||||
|
replace = true;
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case "-t" or "--threads" when i + 1 < args.Length:
|
||||||
|
int.TryParse(args[++i], out threads);
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
if (!a.StartsWith('-'))
|
||||||
|
{
|
||||||
|
if (string.IsNullOrEmpty(command))
|
||||||
|
command = a;
|
||||||
|
else if (string.IsNullOrEmpty(subCommand) && positional.Count == 0)
|
||||||
|
// Peek: is this a known subcommand for the current command?
|
||||||
|
subCommand = MaybeSubCommand(command, a) ? a : AddPositional(positional, a);
|
||||||
|
else
|
||||||
|
positional.Add(a);
|
||||||
|
}
|
||||||
|
// Unknown flag — silently skip
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return new CliArgs
|
||||||
|
{
|
||||||
|
Command = command,
|
||||||
|
SubCommand = subCommand,
|
||||||
|
Positional = positional,
|
||||||
|
Verbose = verbose,
|
||||||
|
Sid = sid,
|
||||||
|
Compress = compress,
|
||||||
|
Threads = threads,
|
||||||
|
Replace = replace,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Helpers ───────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
private static bool MaybeSubCommand(string command, string token) =>
|
||||||
|
command == "cron" && token == "setup";
|
||||||
|
|
||||||
|
private static string AddPositional(List<string> list, string value)
|
||||||
|
{
|
||||||
|
list.Add(value);
|
||||||
|
return string.Empty;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>Gets positional arg at index, or empty string if not present.</summary>
|
||||||
|
public string Pos(int index) =>
|
||||||
|
index < Positional.Count ? Positional[index] : string.Empty;
|
||||||
|
}
|
||||||
24
Commands/AuroraCommand.cs
Normal file
24
Commands/AuroraCommand.cs
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class AuroraCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var sid = string.IsNullOrWhiteSpace(cli.Sid) ? config.Hana.Sid : cli.Sid;
|
||||||
|
log.Step($"Starting Aurora refresh (SID: {sid})...");
|
||||||
|
await ServiceFactory.CreateAuroraService(log)
|
||||||
|
.RunAsync(config.Aurora, config.Hana, sid);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
24
Commands/BackupCommand.cs
Normal file
24
Commands/BackupCommand.cs
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class BackupCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var sid = string.IsNullOrWhiteSpace(cli.Sid) ? config.Hana.Sid : cli.Sid;
|
||||||
|
log.Step($"Starting backup (SID: {sid})...");
|
||||||
|
await ServiceFactory.CreateBackupService(log)
|
||||||
|
.RunAsync(config.Backup, config.Hana, sid);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
22
Commands/CleanCommand.cs
Normal file
22
Commands/CleanCommand.cs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class CleanCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
log.Step("Starting cleanup...");
|
||||||
|
await new CleanerService(log).RunAsync(config.Cleaner);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
40
Commands/CronCommand.cs
Normal file
40
Commands/CronCommand.cs
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
using HanaToolbox.Tui;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class CronCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
|
||||||
|
// cron setup → TUI
|
||||||
|
if (cli.SubCommand == "setup")
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var tui = new CronSetupTui();
|
||||||
|
config = tui.Run(config);
|
||||||
|
ConfigService.Save(config);
|
||||||
|
log.Always("Cron settings saved to /etc/hanatoolbox/hanatoolbox.json");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
|
||||||
|
// cron → orchestrator (called by system cron every minute)
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var sid = string.IsNullOrWhiteSpace(cli.Sid) ? config.Hana.Sid : cli.Sid;
|
||||||
|
await ServiceFactory.CreateCronOrchestrator(log).RunAsync(config, sid);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
35
Commands/ExportCommand.cs
Normal file
35
Commands/ExportCommand.cs
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class ExportCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
var schema = cli.Pos(0);
|
||||||
|
var path = cli.Pos(1);
|
||||||
|
|
||||||
|
if (string.IsNullOrWhiteSpace(schema) || string.IsNullOrWhiteSpace(path))
|
||||||
|
{
|
||||||
|
Console.Error.WriteLine("Usage: hanatoolbox export <schema> <path> [-c] [-t N]");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var sid = string.IsNullOrWhiteSpace(cli.Sid) ? config.Hana.Sid : cli.Sid;
|
||||||
|
var svc = ServiceFactory.CreateBackupService(log);
|
||||||
|
var locator = ServiceFactory.CreateLocator(log);
|
||||||
|
var hdbsql = locator.LocateHdbsql(config.Hana.HdbsqlPath, sid, config.Hana.InstanceNumber);
|
||||||
|
var threads = cli.Threads > 0 ? cli.Threads : Math.Max(1, Environment.ProcessorCount / 2);
|
||||||
|
await svc.ExportSchemaAsync(hdbsql, config.Backup.UserKey, schema, path, threads, cli.Compress, sid, CancellationToken.None);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
30
Commands/FirewallCommand.cs
Normal file
30
Commands/FirewallCommand.cs
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
using HanaToolbox.Tui;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class FirewallCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var runner = ServiceFactory.CreateRunner(log);
|
||||||
|
var fwService = new FirewallService(runner, log);
|
||||||
|
var tui = new FirewallTui(fwService, log);
|
||||||
|
var updated = await tui.RunAsync(config.Firewall);
|
||||||
|
if (updated != null)
|
||||||
|
{
|
||||||
|
config.Firewall = updated;
|
||||||
|
ConfigService.Save(config);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
36
Commands/ImportCommand.cs
Normal file
36
Commands/ImportCommand.cs
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class ImportCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
var schema = cli.Pos(0);
|
||||||
|
var path = cli.Pos(1);
|
||||||
|
|
||||||
|
if (string.IsNullOrWhiteSpace(schema) || string.IsNullOrWhiteSpace(path))
|
||||||
|
{
|
||||||
|
Console.Error.WriteLine("Usage: hanatoolbox import <schema> <path> [-c] [-t N] [--replace]");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var sid = string.IsNullOrWhiteSpace(cli.Sid) ? config.Hana.Sid : cli.Sid;
|
||||||
|
var svc = ServiceFactory.CreateBackupService(log);
|
||||||
|
var locator = ServiceFactory.CreateLocator(log);
|
||||||
|
var hdbsql = locator.LocateHdbsql(config.Hana.HdbsqlPath, sid, config.Hana.InstanceNumber);
|
||||||
|
var threads = cli.Threads > 0 ? cli.Threads : Math.Max(1, Environment.ProcessorCount / 2);
|
||||||
|
await svc.ImportSchemaAsync(hdbsql, config.Backup.UserKey, schema, path,
|
||||||
|
threads, cli.Compress, cli.Replace, newSchema: null, sid, CancellationToken.None);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
39
Commands/ImportRenameCommand.cs
Normal file
39
Commands/ImportRenameCommand.cs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class ImportRenameCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
var schema = cli.Pos(0);
|
||||||
|
var newName = cli.Pos(1);
|
||||||
|
var path = cli.Pos(2);
|
||||||
|
|
||||||
|
if (string.IsNullOrWhiteSpace(schema) || string.IsNullOrWhiteSpace(newName)
|
||||||
|
|| string.IsNullOrWhiteSpace(path))
|
||||||
|
{
|
||||||
|
Console.Error.WriteLine(
|
||||||
|
"Usage: hanatoolbox import-rename <schema> <new-schema> <path> [-c] [-t N] [--replace]");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var sid = string.IsNullOrWhiteSpace(cli.Sid) ? config.Hana.Sid : cli.Sid;
|
||||||
|
var svc = ServiceFactory.CreateBackupService(log);
|
||||||
|
var locator = ServiceFactory.CreateLocator(log);
|
||||||
|
var hdbsql = locator.LocateHdbsql(config.Hana.HdbsqlPath, sid, config.Hana.InstanceNumber);
|
||||||
|
var threads = cli.Threads > 0 ? cli.Threads : Math.Max(1, Environment.ProcessorCount / 2);
|
||||||
|
await svc.ImportSchemaAsync(hdbsql, config.Backup.UserKey, schema, path,
|
||||||
|
threads, cli.Compress, cli.Replace, newName, sid, CancellationToken.None);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
28
Commands/KeyManagerCommand.cs
Normal file
28
Commands/KeyManagerCommand.cs
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
using HanaToolbox.Tui;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class KeyManagerCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var sid = string.IsNullOrWhiteSpace(cli.Sid) ? config.Hana.Sid : cli.Sid;
|
||||||
|
var runner = ServiceFactory.CreateRunner(log);
|
||||||
|
var switcher = new SuUserSwitcher(runner);
|
||||||
|
var locator = new HdbClientLocator(runner);
|
||||||
|
var keySvc = new KeyManagerService(switcher, locator, log);
|
||||||
|
var tui = new KeyManagerTui(keySvc, locator, log);
|
||||||
|
await tui.RunAsync(config.Hana, sid);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
24
Commands/MonitorCommand.cs
Normal file
24
Commands/MonitorCommand.cs
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class MonitorCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var sid = string.IsNullOrWhiteSpace(cli.Sid) ? config.Hana.Sid : cli.Sid;
|
||||||
|
log.Step("Running monitor check...");
|
||||||
|
await ServiceFactory.CreateMonitorService(log)
|
||||||
|
.RunAsync(config.Monitor, config.Hana, sid);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
30
Commands/OnboardCommand.cs
Normal file
30
Commands/OnboardCommand.cs
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
using HanaToolbox.Tui;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Commands;
|
||||||
|
|
||||||
|
public static class OnboardCommand
|
||||||
|
{
|
||||||
|
public static async Task<int> RunAsync(CliArgs cli)
|
||||||
|
{
|
||||||
|
var log = new AppLogger(cli.Verbose);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var runner = ServiceFactory.CreateRunner(log);
|
||||||
|
var switcher = new SuUserSwitcher(runner);
|
||||||
|
var locator = new HdbClientLocator(runner);
|
||||||
|
var keySvc = new KeyManagerService(switcher, locator, log);
|
||||||
|
var keyTui = new KeyManagerTui(keySvc, locator, log);
|
||||||
|
var fwSvc = new FirewallService(runner, log);
|
||||||
|
var fwTui = new FirewallTui(fwSvc, log);
|
||||||
|
var cronTui = new CronSetupTui();
|
||||||
|
var tui = new OnboardTui(keyTui, cronTui, fwTui, log);
|
||||||
|
await tui.RunAsync();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
catch (Exception ex) { log.Error(ex.Message); return 1; }
|
||||||
|
}
|
||||||
|
}
|
||||||
14
Config/AppConfig.cs
Normal file
14
Config/AppConfig.cs
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
using System.Text.Json.Serialization;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
public sealed class AppConfig
|
||||||
|
{
|
||||||
|
public HanaConfig Hana { get; set; } = new();
|
||||||
|
public BackupConfig Backup { get; set; } = new();
|
||||||
|
public CleanerConfig Cleaner { get; set; } = new();
|
||||||
|
public MonitorConfig Monitor { get; set; } = new();
|
||||||
|
public FirewallConfig Firewall { get; set; } = new();
|
||||||
|
public AuroraConfig Aurora { get; set; } = new();
|
||||||
|
public NtfyConfig Ntfy { get; set; } = new();
|
||||||
|
}
|
||||||
26
Config/AppConfigJsonContext.cs
Normal file
26
Config/AppConfigJsonContext.cs
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
using System.Text.Json.Serialization;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// AOT-compatible JSON source generation context.
|
||||||
|
/// All config types serialized by the app must be listed here.
|
||||||
|
/// </summary>
|
||||||
|
[JsonSerializable(typeof(AppConfig))]
|
||||||
|
[JsonSerializable(typeof(HanaConfig))]
|
||||||
|
[JsonSerializable(typeof(BackupConfig))]
|
||||||
|
[JsonSerializable(typeof(CleanerConfig))]
|
||||||
|
[JsonSerializable(typeof(MonitorConfig))]
|
||||||
|
[JsonSerializable(typeof(FirewallConfig))]
|
||||||
|
[JsonSerializable(typeof(FirewallServiceEntry))]
|
||||||
|
[JsonSerializable(typeof(AuroraConfig))]
|
||||||
|
[JsonSerializable(typeof(NtfyConfig))]
|
||||||
|
[JsonSerializable(typeof(List<string>))]
|
||||||
|
[JsonSerializable(typeof(List<FirewallServiceEntry>))]
|
||||||
|
[JsonSourceGenerationOptions(
|
||||||
|
WriteIndented = true,
|
||||||
|
PropertyNamingPolicy = JsonKnownNamingPolicy.CamelCase,
|
||||||
|
UseStringEnumConverter = true)]
|
||||||
|
internal partial class AppConfigJsonContext : JsonSerializerContext
|
||||||
|
{
|
||||||
|
}
|
||||||
23
Config/AuroraConfig.cs
Normal file
23
Config/AuroraConfig.cs
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
public sealed class AuroraConfig
|
||||||
|
{
|
||||||
|
public bool Enabled { get; set; } = false;
|
||||||
|
public int ScheduleHour { get; set; } = 5;
|
||||||
|
public int ScheduleMinute { get; set; } = 0;
|
||||||
|
|
||||||
|
/// <summary>hdbuserstore key with admin rights (DROP SCHEMA, EXPORT, IMPORT, GRANT).</summary>
|
||||||
|
public string AdminUserKey { get; set; } = "CRONKEY";
|
||||||
|
|
||||||
|
/// <summary>Source schema to export and re-import as <SourceSchema>_AURORA.</summary>
|
||||||
|
public string SourceSchema { get; set; } = string.Empty;
|
||||||
|
|
||||||
|
/// <summary>DB user that receives ALL PRIVILEGES on the Aurora schema.</summary>
|
||||||
|
public string AuroraUser { get; set; } = string.Empty;
|
||||||
|
|
||||||
|
/// <summary>Directory used for temporary export files during the Aurora refresh.</summary>
|
||||||
|
public string BackupBasePath { get; set; } = "/hana/backup/aurora";
|
||||||
|
|
||||||
|
/// <summary>Thread count for export/import. 0 = auto (nproc/2).</summary>
|
||||||
|
public int Threads { get; set; } = 0;
|
||||||
|
}
|
||||||
35
Config/BackupConfig.cs
Normal file
35
Config/BackupConfig.cs
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
public enum BackupType { Tenant, Schema, All }
|
||||||
|
|
||||||
|
public sealed class BackupConfig
|
||||||
|
{
|
||||||
|
public bool Enabled { get; set; } = false;
|
||||||
|
public int ScheduleHour { get; set; } = 2;
|
||||||
|
public int ScheduleMinute { get; set; } = 0;
|
||||||
|
|
||||||
|
/// <summary>What to back up: Tenant, Schema, or All.</summary>
|
||||||
|
public BackupType Type { get; set; } = BackupType.All;
|
||||||
|
|
||||||
|
/// <summary>hdbuserstore key for the tenant DB.</summary>
|
||||||
|
public string UserKey { get; set; } = "CRONKEY";
|
||||||
|
|
||||||
|
/// <summary>Base directory for tenant backup files.</summary>
|
||||||
|
public string BackupBasePath { get; set; } = "/hana/backup/tenant";
|
||||||
|
|
||||||
|
public bool Compress { get; set; } = true;
|
||||||
|
|
||||||
|
public bool BackupSystemDb { get; set; } = false;
|
||||||
|
public string SystemDbUserKey { get; set; } = string.Empty;
|
||||||
|
|
||||||
|
/// <summary>Schema names to export when Type is Schema or All.</summary>
|
||||||
|
public List<string> SchemaNames { get; set; } = [];
|
||||||
|
|
||||||
|
/// <summary>Base directory for schema export files.</summary>
|
||||||
|
public string SchemaBackupPath { get; set; } = "/hana/backup/schema";
|
||||||
|
|
||||||
|
public bool CompressSchema { get; set; } = true;
|
||||||
|
|
||||||
|
/// <summary>Thread count for export/import. 0 = auto (nproc/2).</summary>
|
||||||
|
public int Threads { get; set; } = 0;
|
||||||
|
}
|
||||||
15
Config/CleanerConfig.cs
Normal file
15
Config/CleanerConfig.cs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
public sealed class CleanerConfig
|
||||||
|
{
|
||||||
|
public bool Enabled { get; set; } = false;
|
||||||
|
public int ScheduleHour { get; set; } = 3;
|
||||||
|
public int ScheduleMinute { get; set; } = 0;
|
||||||
|
|
||||||
|
public string TenantBackupPath { get; set; } = "/hana/backup/tenant";
|
||||||
|
public int TenantRetentionDays { get; set; } = 7;
|
||||||
|
|
||||||
|
/// <summary>One or more log backup directories. Each is cleaned with LogRetentionDays.</summary>
|
||||||
|
public List<string> LogBackupPaths { get; set; } = [];
|
||||||
|
public int LogRetentionDays { get; set; } = 1;
|
||||||
|
}
|
||||||
40
Config/ConfigService.cs
Normal file
40
Config/ConfigService.cs
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
using System.Text.Json;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
public sealed class ConfigService
|
||||||
|
{
|
||||||
|
private const string ConfigDir = "/etc/hanatoolbox";
|
||||||
|
private const string ConfigFile = "/etc/hanatoolbox/hanatoolbox.json";
|
||||||
|
|
||||||
|
public static AppConfig Load()
|
||||||
|
{
|
||||||
|
if (!File.Exists(ConfigFile))
|
||||||
|
return new AppConfig();
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
var json = File.ReadAllText(ConfigFile);
|
||||||
|
return JsonSerializer.Deserialize(json, AppConfigJsonContext.Default.AppConfig)
|
||||||
|
?? new AppConfig();
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
throw new InvalidOperationException(
|
||||||
|
$"Failed to load config from {ConfigFile}: {ex.Message}", ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void Save(AppConfig config)
|
||||||
|
{
|
||||||
|
Directory.CreateDirectory(ConfigDir);
|
||||||
|
Directory.CreateDirectory(StateDirectory);
|
||||||
|
|
||||||
|
var json = JsonSerializer.Serialize(config, AppConfigJsonContext.Default.AppConfig);
|
||||||
|
File.WriteAllText(ConfigFile, json);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static bool Exists() => File.Exists(ConfigFile);
|
||||||
|
|
||||||
|
public static string StateDirectory => "/etc/hanatoolbox/state";
|
||||||
|
}
|
||||||
36
Config/FirewallConfig.cs
Normal file
36
Config/FirewallConfig.cs
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
using System.Text.Json.Serialization;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
[JsonConverter(typeof(JsonStringEnumConverter<FirewallDecision>))]
|
||||||
|
public enum FirewallDecision { Skip, All, Ip }
|
||||||
|
|
||||||
|
public sealed class FirewallServiceEntry
|
||||||
|
{
|
||||||
|
public string Name { get; set; } = string.Empty;
|
||||||
|
public List<string> Ports { get; set; } = [];
|
||||||
|
public FirewallDecision Decision { get; set; } = FirewallDecision.Skip;
|
||||||
|
public List<string> AllowedIps { get; set; } = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
public sealed class FirewallConfig
|
||||||
|
{
|
||||||
|
public bool Enabled { get; set; } = false;
|
||||||
|
public int ScheduleHour { get; set; } = 4;
|
||||||
|
public int ScheduleMinute { get; set; } = 0;
|
||||||
|
|
||||||
|
/// <summary>Whether to flush all existing rules before applying. Saved in config.</summary>
|
||||||
|
public bool FlushBeforeApply { get; set; } = false;
|
||||||
|
|
||||||
|
public List<FirewallServiceEntry> Services { get; set; } =
|
||||||
|
[
|
||||||
|
new() { Name = "SAP Web Client", Ports = ["443"], Decision = FirewallDecision.Skip },
|
||||||
|
new() { Name = "SAP HANA Database (System & Company DB)", Ports = ["30013","30015"], Decision = FirewallDecision.Skip },
|
||||||
|
new() { Name = "SAP Business One SLD", Ports = ["40000"], Decision = FirewallDecision.Skip },
|
||||||
|
new() { Name = "SAP Business One Auth", Ports = ["40020"], Decision = FirewallDecision.Skip },
|
||||||
|
new() { Name = "SAP Business One Service Layer/Cockpit", Ports = ["50000","4300"], Decision = FirewallDecision.Skip },
|
||||||
|
new() { Name = "SAP Host Agent", Ports = ["1128","1129"], Decision = FirewallDecision.Skip },
|
||||||
|
new() { Name = "SSH Remote Access", Ports = ["22"], Decision = FirewallDecision.Skip },
|
||||||
|
new() { Name = "SMB / B1_SHR (File Sharing)", Ports = ["139","445"], Decision = FirewallDecision.Skip },
|
||||||
|
];
|
||||||
|
}
|
||||||
14
Config/HanaConfig.cs
Normal file
14
Config/HanaConfig.cs
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
public sealed class HanaConfig
|
||||||
|
{
|
||||||
|
/// <summary>HANA System ID, e.g. NDB. Used to build the OS user <sid>adm.</summary>
|
||||||
|
public string Sid { get; set; } = "NDB";
|
||||||
|
public string InstanceNumber { get; set; } = "00";
|
||||||
|
|
||||||
|
/// <summary>Optional override for hdbsql binary path. Null = auto-detect.</summary>
|
||||||
|
public string? HdbsqlPath { get; set; }
|
||||||
|
|
||||||
|
/// <summary>Optional override for hdbuserstore binary path. Null = auto-detect.</summary>
|
||||||
|
public string? HdbuserstorePath { get; set; }
|
||||||
|
}
|
||||||
38
Config/MonitorConfig.cs
Normal file
38
Config/MonitorConfig.cs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
public sealed class MonitorConfig
|
||||||
|
{
|
||||||
|
public bool Enabled { get; set; } = false;
|
||||||
|
|
||||||
|
/// <summary>hdbuserstore key used for all monitoring SQL queries.</summary>
|
||||||
|
public string HanaUserKey { get; set; } = "CRONKEY";
|
||||||
|
|
||||||
|
public string HanaInstanceNumber { get; set; } = "00";
|
||||||
|
|
||||||
|
/// <summary>Full path to sapcontrol binary.</summary>
|
||||||
|
public string SapcontrolPath { get; set; } = "/usr/sap/NDB/HDB00/exe/sapcontrol";
|
||||||
|
|
||||||
|
/// <summary>Company name included in ntfy alert messages.</summary>
|
||||||
|
public string CompanyName { get; set; } = "MyCompany";
|
||||||
|
|
||||||
|
/// <summary>Disk usage alert threshold in percent.</summary>
|
||||||
|
public int DiskUsageThresholdPercent { get; set; } = 85;
|
||||||
|
|
||||||
|
/// <summary>Alert if truncated log segments exceed this percent of total.</summary>
|
||||||
|
public int TruncatedSegmentThresholdPercent { get; set; } = 80;
|
||||||
|
|
||||||
|
/// <summary>Alert if free log segments fall below this percent of total.</summary>
|
||||||
|
public int FreeSegmentThresholdPercent { get; set; } = 10;
|
||||||
|
|
||||||
|
/// <summary>Statement queue length above which a breach is counted.</summary>
|
||||||
|
public int StatementQueueThreshold { get; set; } = 10;
|
||||||
|
|
||||||
|
/// <summary>How many consecutive cron ticks above threshold before alerting.</summary>
|
||||||
|
public int StatementQueueConsecutiveRuns { get; set; } = 3;
|
||||||
|
|
||||||
|
/// <summary>Alert if the last successful backup is older than this many hours.</summary>
|
||||||
|
public int BackupThresholdHours { get; set; } = 26;
|
||||||
|
|
||||||
|
/// <summary>Directories checked for disk usage.</summary>
|
||||||
|
public List<string> DirectoriesToMonitor { get; set; } = ["/hana/data", "/hana/log"];
|
||||||
|
}
|
||||||
7
Config/NtfyConfig.cs
Normal file
7
Config/NtfyConfig.cs
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
namespace HanaToolbox.Config;
|
||||||
|
|
||||||
|
public sealed class NtfyConfig
|
||||||
|
{
|
||||||
|
public string Url { get; set; } = "https://ntfy.sh/your-topic";
|
||||||
|
public string Token { get; set; } = string.Empty;
|
||||||
|
}
|
||||||
20
HanaToolbox.csproj
Normal file
20
HanaToolbox.csproj
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
<Project Sdk="Microsoft.NET.Sdk">
|
||||||
|
|
||||||
|
<PropertyGroup>
|
||||||
|
<OutputType>Exe</OutputType>
|
||||||
|
<TargetFramework>net10.0</TargetFramework>
|
||||||
|
<RootNamespace>HanaToolbox</RootNamespace>
|
||||||
|
<Nullable>enable</Nullable>
|
||||||
|
<ImplicitUsings>enable</ImplicitUsings>
|
||||||
|
<AllowUnsafeBlocks>false</AllowUnsafeBlocks>
|
||||||
|
<PublishAot>true</PublishAot>
|
||||||
|
<OptimizationPreference>Speed</OptimizationPreference>
|
||||||
|
<NoWarn>$(NoWarn);CS9113</NoWarn>
|
||||||
|
|
||||||
|
</PropertyGroup>
|
||||||
|
|
||||||
|
<ItemGroup>
|
||||||
|
<PackageReference Include="Spectre.Console" Version="0.49.1" />
|
||||||
|
</ItemGroup>
|
||||||
|
|
||||||
|
</Project>
|
||||||
45
Logging/AppLogger.cs
Normal file
45
Logging/AppLogger.cs
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
using Spectre.Console;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Logging;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Centralized logger. Errors are always shown; Info/Warn require --verbose.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class AppLogger(bool verbose)
|
||||||
|
{
|
||||||
|
public bool IsVerbose => verbose;
|
||||||
|
|
||||||
|
public void Error(string message) =>
|
||||||
|
AnsiConsole.MarkupLine($"[red]❌ {Escape(message)}[/]");
|
||||||
|
|
||||||
|
public void Warning(string message)
|
||||||
|
{
|
||||||
|
if (verbose)
|
||||||
|
AnsiConsole.MarkupLine($"[yellow]⚠️ {Escape(message)}[/]");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void Info(string message)
|
||||||
|
{
|
||||||
|
if (verbose)
|
||||||
|
AnsiConsole.MarkupLine($"[grey]{Escape(message)}[/]");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void Success(string message)
|
||||||
|
{
|
||||||
|
if (verbose)
|
||||||
|
AnsiConsole.MarkupLine($"[green]✅ {Escape(message)}[/]");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void Step(string message)
|
||||||
|
{
|
||||||
|
if (verbose)
|
||||||
|
AnsiConsole.MarkupLine($"[blue]⚙️ {Escape(message)}[/]");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void Always(string message) =>
|
||||||
|
AnsiConsole.MarkupLine(Escape(message));
|
||||||
|
|
||||||
|
private static string Escape(string s) => s
|
||||||
|
.Replace("[", "[[")
|
||||||
|
.Replace("]", "]]");
|
||||||
|
}
|
||||||
150
PROJECT.md
Normal file
150
PROJECT.md
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
# Project: HanaToolbox
|
||||||
|
|
||||||
|
The project should be written in C#, it should use .NET 10 and it should be compiled as native AOT.
|
||||||
|
Note that .NET 10 is the only SDK installed on this system, so you don't need to specify it.
|
||||||
|
|
||||||
|
I have provided the templates folder with some Bash script files. These contain some tools which help me with managing systems.
|
||||||
|
The systems I'm managing are SUSE Linux systems, which have SAP Business One and SAP HANA databases on them. The scripts are used for creating backups, cleaning up log backups, generating firewall rules, exporting schemas, importing schemas, and monitoring the system.
|
||||||
|
|
||||||
|
Your task is to create a C# application which will provide the same functionality as the Bash scripts. The application should be a console application and it should be compiled as native AOT.
|
||||||
|
|
||||||
|
The application should have the following features:
|
||||||
|
- Cron systems:
|
||||||
|
- Backup (Create tenant backups at a set time every day, backup.sh)
|
||||||
|
- Clean up log backups (Delete log and tenant backups older than a set time every day, cleanup.sh)
|
||||||
|
- Monitor (Monitor the system, monitor.sh)
|
||||||
|
- Generate firewall rules (Generate firewall rules for the system, firewall.sh)
|
||||||
|
- Aurora (Create a new schema early morning every day, so the customer has a fresh test schema, aurora.sh)
|
||||||
|
- Non-cron systems:
|
||||||
|
- Export schema (Export schema of the system, hanatools.sh)
|
||||||
|
- Import schema (Import schema of the system, hanatools.sh)
|
||||||
|
- Keymanager (Manage the hdbuserstore, keymanager.sh)
|
||||||
|
|
||||||
|
## Questions
|
||||||
|
|
||||||
|
### General Architecture
|
||||||
|
1. **Single binary or multi-command CLI?** The Bash scripts are separate files that call each other (e.g., `backup.sh` invokes `hanatool.sh`). Should the C# app be a single executable with subcommands (e.g., `hanatoolbox backup`, `hanatoolbox export`, `hanatoolbox keymanager`), similar to how `git` or `docker` work? Or should there be separate executables per tool?
|
||||||
|
|
||||||
|
Answer: Yes, I want a single executable with subcommands.
|
||||||
|
|
||||||
|
2. **Configuration format** — The Bash scripts use `.conf` files that are simple `KEY=VALUE` shell files sourced at runtime. Should the C# app use a similar flat format, or would you prefer a structured format like **JSON**, **TOML**, or **YAML** for the configuration files? Should the config file location be fixed (next to the binary) or configurable via a flag?
|
||||||
|
|
||||||
|
Answer: Structured formats like JSON would be perfect for this.
|
||||||
|
|
||||||
|
3. **Logging** — The Bash scripts print emoji-prefixed progress messages to stdout/stderr. Should the C# app replicate this exact style (colorful, emoji-rich console output), or would you prefer a more structured approach (e.g., using a logging library with severity levels like `INFO`, `WARN`, `ERROR`)? Should log output also be written to a file?
|
||||||
|
|
||||||
|
Answer: I want a verbose flag which prints out also info and warnings, but generally I only want to log errors. You can log them in the structured C# kind of way.
|
||||||
|
|
||||||
|
### Cron / Scheduling
|
||||||
|
4. **Built-in scheduler or external cron?** The scripts are designed to be called by the OS cron daemon. Should the C# app include its own built-in scheduler (running as a daemon/service), or should the compiled binary still be invoked directly by a system cron job — just replacing the Bash scripts as the executable?
|
||||||
|
|
||||||
|
Answer: The binary should be called by the system cron job with the command cron, and I also want another command where I can set up the cron job. So basically a TUI; you can use Spectre.Console for that. Keep in mind that Spectre.Console is compatible with AOT, but Spectre.Console.Cli is not, so use the System.CommandLine library for that. I also plan to run the cron job every minute, so make sure you think about that.
|
||||||
|
|
||||||
|
5. **Systemd integration** — If it runs as a daemon, should it integrate with systemd (e.g., notify readiness via `sd_notify`, handle `SIGTERM` gracefully)?
|
||||||
|
|
||||||
|
Answer: No, I don't need systemd integration.
|
||||||
|
|
||||||
|
### `hanatool` / Core Functionality
|
||||||
|
6. **HDB client path detection** — `hanatool.sh` checks two hardcoded paths (`/usr/sap/hdbclient` and `/usr/sap/NDB/HDB00/exe`). Should the C# app keep these same two fallback paths, or make them fully configurable? Are those the only two paths that ever appear in the wild across your managed systems?
|
||||||
|
|
||||||
|
Answer: The program will need to have access to HDBSQL and HDBUserStore binaries or scripts. So you can also get them with the which command, but these are generally the two paths that they are usually located in.
|
||||||
|
|
||||||
|
7. **`install.sh` scope** — `install.sh` downloads scripts from your Gitea instance (`git.technopunk.space`). Should this self-updater/installer also be ported to C#, or is it out of scope since the C# app will be distributed differently (e.g., as a single AOT binary)?
|
||||||
|
|
||||||
|
Answer: Yeah, keep the install.sh because I will grab this binary from my git instance. So the install logic should be roughly the same, but it will only get this binary file, not the thousands of scripts.
|
||||||
|
|
||||||
|
8. **ntfy.sh notifications** — `hanatool.sh` and `monitor.sh` send notifications to a hardcoded ntfy topic URL (`https://ntfy.technopunk.space/sap`). Should the ntfy topic URL and bearer token be configurable in a global config file, or per-tool config? Should the C# app support sending notifications on success, failure, or both (currently the scripts only send on both but the behaviour differs per script)?
|
||||||
|
|
||||||
|
Answer: You can hardcode the URL, but actually you can hardcode the token as well. Since it's a binary it's very difficult to get it out of.
|
||||||
|
|
||||||
|
### Firewall Tool
|
||||||
|
9. **Interactive vs. non-interactive** — `firewalld.sh` is fully interactive (menu-driven). Should the C# port also be interactive (like a TUI), or should it be a pure CLI tool where service rules are passed as arguments or defined in a config file upfront?
|
||||||
|
|
||||||
|
Answer: I want a TUI. Spectre.Console again here.
|
||||||
|
|
||||||
|
10. **Predefined services** — The firewall script has 8 hardcoded SAP-related services with fixed port numbers. Should these remain hardcoded in the C# app, or should they be loaded from a configurable list so you can add/remove services without recompiling?
|
||||||
|
|
||||||
|
Answer: I want to keep them hardcoded.
|
||||||
|
|
||||||
|
### Key Manager
|
||||||
|
11. **Interactive TUI or CLI subcommands?** — `keymanager.sh` is an interactive menu. Should the C# version also be interactive (e.g., using `Spectre.Console` prompts), or should it expose subcommands like `hanatoolbox keymanager add`, `hanatoolbox keymanager delete`, `hanatoolbox keymanager test`?
|
||||||
|
|
||||||
|
Answer: I want a TUI. Spectre.Console again.
|
||||||
|
|
||||||
|
### Monitor
|
||||||
|
12. **State persistence** — `monitor.sh` stores alert state in flat `.state` files under a `monitor_state/` directory to avoid duplicate notifications. Should the C# app use the same file-based state mechanism, or would you prefer a lightweight embedded database (e.g., SQLite)?
|
||||||
|
|
||||||
|
Answer: Whatever is more simple, if you can use an embedded database while still keeping the codebase relatively simple and easy to maintain, then you can do that, otherwise you can stick with the very simple file-based system.
|
||||||
|
|
||||||
|
### Aurora
|
||||||
|
13. **Post-import SQL scripts** — `aurora.sh` supports running arbitrary SQL files from a configurable `SQL_SCRIPTS_ROOT` directory after the import. Should this feature be preserved in the C# version?
|
||||||
|
|
||||||
|
Answer: No need for these scripts to run.
|
||||||
|
|
||||||
|
### Deployment
|
||||||
|
14. **Target user / privilege level** — The scripts invoke `firewall-cmd` and other privileged tools. Is the app expected to be run as root, or via `sudo`? Should it handle privilege escalation internally (e.g., re-exec itself with sudo if not root)?
|
||||||
|
|
||||||
|
Answer: By default I want it to run as root, however most other parts should run as the tenant user for SAP HANA. So that means if I want to use keymanager, backup, aurora, monitor, etc. you need to do a `su - <SID>adm` and then run the command. In 90% of the cases the SID is just NDB. But there are some exceptions where the SID is different, so I want to be able to specify the SID in the command line.
|
||||||
|
|
||||||
|
15. **Multi-system management** — Each system currently has its own copy of the scripts with its own `.conf` files. Should the C# app manage a single local system only, or should it be able to target remote systems (e.g., via SSH)?
|
||||||
|
|
||||||
|
Answer: I only want to do local systems, so I will install this binary into every single system manually. It's fine.
|
||||||
|
|
||||||
|
I also want basically an onboard command where I can do the initial setup for all the parameters and everything.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Follow-up Questions (Round 2)
|
||||||
|
|
||||||
|
### Cron Scheduling
|
||||||
|
16. **Schedule storage model** — Since the `cron` command runs every minute, the app needs to know which tasks are due. Should each scheduled task store its own "next run time" or "last run time" in the state/config, and the `cron` command checks whether enough time has passed since the last run? Or should the schedule be expressed as a cron expression (e.g., `"0 2 * * *"`) that the app evaluates itself?
|
||||||
|
|
||||||
|
Answer: So for backups, Aurora, and the cleaner parts, they will need to run at a set time each day. So for that, you can just check basically the system time and then make sure that it runs when it needs to run. For the monitor, the monitor needs to run often. So, basically, you shouldn't create a separate monitor; you should monitor every time the cron command runs.
|
||||||
|
|
||||||
|
17. **Which tasks are cron-driven?** — The project lists Backup, Cleanup, Monitor, Firewall, and Aurora as cron tasks. Should all five be independently schedulable (each with their own time), or is the intent that Monitor runs every minute (so it's always called on every cron tick) while the others run once a day at a configured time?
|
||||||
|
|
||||||
|
Answer: Monitor runs every minute, and the others run once a day at a configured time.
|
||||||
|
|
||||||
|
18. **`cron setup` TUI scope** — When you run `hanatoolbox cron setup`, should it let you configure the schedule and settings for *all* cron tasks in one flow, or just install/update the system crontab entry for the binary itself (i.e., add `* * * * * /path/to/hanatoolbox cron` to the user's crontab)?
|
||||||
|
|
||||||
|
Answer: The cron setup should let me configure all the schedule and settings for all the cron tasks. So I will do the system cron tab manually. But generally the cron setup command in the binary should let me configure the actual settings and schedule.
|
||||||
|
|
||||||
|
### Configuration & Onboarding
|
||||||
|
19. **Single config file or per-tool?** — The Bash scripts use separate `.conf` files per tool (`backup.conf`, `aurora.conf`, `monitor.conf`). Should the C# app use one global `hanatoolbox.json` file covering all tools, or separate JSON files per subsystem? And where should it be stored — next to the binary, in `/etc/hanatoolbox/`, or somewhere else?
|
||||||
|
|
||||||
|
Answer: I want to use one global `hanatoolbox.json` file covering all tools, and it should be stored in `/etc/hanatoolbox/`.
|
||||||
|
|
||||||
|
20. **`onboard` command scope** — Should `onboard` be a guided TUI wizard that walks through every setting for every tool in one session, or should each tool have its own `configure` subcommand (e.g., `hanatoolbox backup configure`) so you can re-configure individual tools later without redoing everything?
|
||||||
|
|
||||||
|
Answer: The onboard command should be a TUI wizard that walks through every setting for every tool. So that means that it should create the user store keys based on the key manager. It should ask for the backup locations, the schedules, everything.
|
||||||
|
|
||||||
|
### User Switching
|
||||||
|
21. **How to switch to `<SID>adm`?** — When the app (running as root) needs to execute HANA operations, should it shell out the relevant process via `su - ndbadm -c "<command>"`, or use `runuser -l ndbadm -c "<command>"`? Some operations like `hdbuserstore` are very sensitive to which user's home directory is used, so I want to make sure the switching strategy matches what the Bash scripts were doing.
|
||||||
|
|
||||||
|
Answer: I'm not sure what the difference is, but the first one works perfectly for now. So `su - ndbadm -c <command>`.
|
||||||
|
|
||||||
|
22. **SID as a global config value or per-invocation flag?** — Should the HANA SID (defaulting to `NDB`) be stored in the global config and optionally overridden with a `--sid` flag on any command, or should it only ever be set in the config file?
|
||||||
|
|
||||||
|
Answer: I want the SID to be stored in the global config and optionally overridden with a `--sid` flag on any command.
|
||||||
|
|
||||||
|
### ntfy Notifications
|
||||||
|
23. **Token as a compile-time placeholder** — Since both the URL and token will be hardcoded into the binary, should I leave them as clearly-named compile-time constants (e.g., `const string NtfyToken = "YOUR_TOKEN_HERE"`) that you fill in before building, or do you want to provide the actual token now so it can be set from the start?
|
||||||
|
|
||||||
|
Answer: Here's the token: "tk_ggis2y224ip7r5q0p6x3v2t4j1q7m"
|
||||||
|
|
||||||
|
### Cleaner / Cleanup
|
||||||
|
24. **`cleaner.sh` target paths** — `cleaner.sh` takes `<retention_days>:<path>` pairs as arguments. In the C# version (which is cron-driven), those pairs need to come from config. What paths and retention periods are typically configured? For example: tenant backup files for 7 days, log backup files for 3 days? Knowing the typical setup will help design the config schema correctly.
|
||||||
|
|
||||||
|
Answer: Tenant backup files for 7 days, log backup files for 1 day. Keep in mind that log backups can be found in multiple locations, so I want to be able to specify multiple paths.
|
||||||
|
|
||||||
|
### Firewall
|
||||||
|
25. **`firewall-cmd` only?** — The script uses `firewall-cmd` (firewalld). Can I assume all target systems use firewalld, or do some use raw `iptables`/`nftables` that would need a different backend?
|
||||||
|
|
||||||
|
Answer: I want to use `firewall-cmd` only.
|
||||||
|
|
||||||
|
### Aurora
|
||||||
|
26. **`_AURORA` suffix — always fixed?** — `aurora.sh` always names the target schema `<SOURCE_SCHEMA>_AURORA`. Should this suffix be hardcoded in the C# version too, or do you want it to be configurable per instance?
|
||||||
|
|
||||||
|
Answer: I want to use the suffix as hardcoded in the C# version.
|
||||||
|
|
||||||
|
I also want to adhere to the SOLID principles as much as possible. For easy maintenance and extensibility, I want to use the SOLID principles as much as possible.
|
||||||
56
Program.cs
Normal file
56
Program.cs
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
using HanaToolbox.Cli;
|
||||||
|
using HanaToolbox.Commands;
|
||||||
|
|
||||||
|
var cli = CliArgs.Parse(args);
|
||||||
|
|
||||||
|
return cli.Command switch
|
||||||
|
{
|
||||||
|
"backup" => await BackupCommand.RunAsync(cli),
|
||||||
|
"export" => await ExportCommand.RunAsync(cli),
|
||||||
|
"import" => await ImportCommand.RunAsync(cli),
|
||||||
|
"import-rename" => await ImportRenameCommand.RunAsync(cli),
|
||||||
|
"aurora" => await AuroraCommand.RunAsync(cli),
|
||||||
|
"clean" => await CleanCommand.RunAsync(cli),
|
||||||
|
"monitor" => await MonitorCommand.RunAsync(cli),
|
||||||
|
"firewall" => await FirewallCommand.RunAsync(cli),
|
||||||
|
"keymanager" => await KeyManagerCommand.RunAsync(cli),
|
||||||
|
"cron" => await CronCommand.RunAsync(cli),
|
||||||
|
"onboard" => await OnboardCommand.RunAsync(cli),
|
||||||
|
"" => Help(),
|
||||||
|
_ => UnknownCommand(cli.Command),
|
||||||
|
};
|
||||||
|
|
||||||
|
static int Help()
|
||||||
|
{
|
||||||
|
Console.WriteLine("""
|
||||||
|
HanaToolbox — SAP HANA & Business One management toolkit
|
||||||
|
|
||||||
|
Usage: hanatoolbox <command> [options]
|
||||||
|
|
||||||
|
Commands:
|
||||||
|
backup Run a HANA tenant/schema backup manually
|
||||||
|
export Export a schema: export <schema> <path> [-c] [-t N]
|
||||||
|
import Import a schema: import <schema> <path> [-c] [-t N] [--replace]
|
||||||
|
import-rename Import & rename: import-rename <src> <dst> <path> [-c] [-t N] [--replace]
|
||||||
|
aurora Run an Aurora schema refresh manually
|
||||||
|
clean Delete old backup/log files per retention policy
|
||||||
|
monitor Run a HANA monitoring check manually
|
||||||
|
firewall Open the interactive Firewall TUI
|
||||||
|
keymanager Open the interactive Key Manager TUI
|
||||||
|
cron Run scheduled tasks (called by system cron every minute)
|
||||||
|
cron setup Configure cron task schedules and settings (TUI)
|
||||||
|
onboard Run the initial setup wizard
|
||||||
|
|
||||||
|
Global options:
|
||||||
|
-v, --verbose Enable verbose output
|
||||||
|
--sid <SID> Override HANA SID from config (default: from hanatoolbox.json)
|
||||||
|
""");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int UnknownCommand(string cmd)
|
||||||
|
{
|
||||||
|
Console.Error.WriteLine($"hanatoolbox: unknown command '{cmd}'");
|
||||||
|
Console.Error.WriteLine("Run 'hanatoolbox' for usage.");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
82
Scheduling/CronOrchestrator.cs
Normal file
82
Scheduling/CronOrchestrator.cs
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Scheduling;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Called every minute by the system cron job.
|
||||||
|
/// Always runs Monitor.
|
||||||
|
/// Runs Backup/Cleaner/Aurora/Firewall once a day at their configured time.
|
||||||
|
/// Uses date-based state files to prevent double-firing.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class CronOrchestrator(
|
||||||
|
IMonitorService monitor,
|
||||||
|
IBackupService backup,
|
||||||
|
ICleanerService cleaner,
|
||||||
|
IAuroraService aurora,
|
||||||
|
IFirewallService firewall,
|
||||||
|
IMonitorStateService stateService,
|
||||||
|
AppLogger logger)
|
||||||
|
{
|
||||||
|
public async Task RunAsync(AppConfig config, string sid, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var now = DateTime.Now;
|
||||||
|
|
||||||
|
// Monitor runs every tick
|
||||||
|
if (config.Monitor.Enabled)
|
||||||
|
{
|
||||||
|
logger.Step("Running monitor check...");
|
||||||
|
try { await monitor.RunAsync(config.Monitor, config.Hana, sid, ct); }
|
||||||
|
catch (Exception ex) { logger.Error($"Monitor error: {ex.Message}"); }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Once-a-day tasks
|
||||||
|
await RunIfScheduled("cron_backup", config.Backup.Enabled,
|
||||||
|
config.Backup.ScheduleHour, config.Backup.ScheduleMinute, now,
|
||||||
|
async () => await backup.RunAsync(config.Backup, config.Hana, sid, ct), ct);
|
||||||
|
|
||||||
|
await RunIfScheduled("cron_cleaner", config.Cleaner.Enabled,
|
||||||
|
config.Cleaner.ScheduleHour, config.Cleaner.ScheduleMinute, now,
|
||||||
|
async () => await cleaner.RunAsync(config.Cleaner, ct), ct);
|
||||||
|
|
||||||
|
await RunIfScheduled("cron_aurora", config.Aurora.Enabled,
|
||||||
|
config.Aurora.ScheduleHour, config.Aurora.ScheduleMinute, now,
|
||||||
|
async () => await aurora.RunAsync(config.Aurora, config.Hana, sid, ct), ct);
|
||||||
|
|
||||||
|
await RunIfScheduled("cron_firewall", config.Firewall.Enabled,
|
||||||
|
config.Firewall.ScheduleHour, config.Firewall.ScheduleMinute, now,
|
||||||
|
async () => await firewall.ApplyAsync(config.Firewall, ct), ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task RunIfScheduled(
|
||||||
|
string key, bool enabled,
|
||||||
|
int hour, int minute,
|
||||||
|
DateTime now,
|
||||||
|
Func<Task> work,
|
||||||
|
CancellationToken ct)
|
||||||
|
{
|
||||||
|
if (!enabled) return;
|
||||||
|
if (now.Hour != hour || now.Minute != minute) return;
|
||||||
|
|
||||||
|
var today = now.ToString("yyyy-MM-dd");
|
||||||
|
var lastRun = stateService.GetState(key);
|
||||||
|
if (lastRun == today)
|
||||||
|
{
|
||||||
|
logger.Info($"Skipping '{key}' — already ran today ({today}).");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Step($"Running scheduled task '{key}'...");
|
||||||
|
try
|
||||||
|
{
|
||||||
|
await work();
|
||||||
|
stateService.SetState(key, today);
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
logger.Error($"Scheduled task '{key}' failed: {ex.Message}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
129
Services/AuroraService.cs
Normal file
129
Services/AuroraService.cs
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Performs the Aurora schema refresh:
|
||||||
|
/// Drop old _AURORA schema → Export source → Import-rename → Update company name → Grant privileges.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class AuroraService(
|
||||||
|
IUserSwitcher switcher,
|
||||||
|
IHdbClientLocator locator,
|
||||||
|
INotificationService ntfy,
|
||||||
|
AppLogger logger) : IAuroraService
|
||||||
|
{
|
||||||
|
public async Task RunAsync(
|
||||||
|
AuroraConfig config, HanaConfig hana, string sid,
|
||||||
|
CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
if (string.IsNullOrWhiteSpace(config.SourceSchema))
|
||||||
|
{
|
||||||
|
logger.Error("Aurora: SourceSchema is not configured.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var hdbsql = locator.LocateHdbsql(hana.HdbsqlPath, sid, hana.InstanceNumber);
|
||||||
|
var threads = config.Threads > 0 ? config.Threads : Math.Max(1, Environment.ProcessorCount / 2);
|
||||||
|
var aurora = $"{config.SourceSchema}_AURORA";
|
||||||
|
var tmpDir = Path.Combine(config.BackupBasePath, $"{aurora}_TEMP_{DateTime.Now:yyyyMMdd_HHmmss}");
|
||||||
|
|
||||||
|
logger.Step($"Starting Aurora refresh: '{config.SourceSchema}' → '{aurora}'");
|
||||||
|
|
||||||
|
// 1. Drop old Aurora schema (ignore errors if it doesn't exist)
|
||||||
|
logger.Step($"Dropping old schema '{aurora}' (if exists)...");
|
||||||
|
await RunSql(hdbsql, config.AdminUserKey,
|
||||||
|
$"DROP SCHEMA \"{aurora}\" CASCADE;", sid, ct);
|
||||||
|
|
||||||
|
// 2. Prepare temp export directory
|
||||||
|
await RunAs(sid, $"mkdir -p \"{tmpDir}\"", ct);
|
||||||
|
|
||||||
|
// 3. Export source schema
|
||||||
|
logger.Step($"Exporting '{config.SourceSchema}' to temp dir...");
|
||||||
|
var exportResult = await RunSql(hdbsql, config.AdminUserKey,
|
||||||
|
$"EXPORT \"{config.SourceSchema}\".\"*\" AS BINARY INTO '{tmpDir}' WITH REPLACE THREADS {threads} NO DEPENDENCIES;",
|
||||||
|
sid, ct);
|
||||||
|
|
||||||
|
if (!exportResult.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Aurora export failed: {exportResult.StdErr}");
|
||||||
|
await ntfy.SendAsync("Aurora Failed", $"Export of '{config.SourceSchema}' FAILED.", ct);
|
||||||
|
await Cleanup(tmpDir, sid, ct);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Import → rename
|
||||||
|
logger.Step($"Importing as '{aurora}'...");
|
||||||
|
var importResult = await RunSql(hdbsql, config.AdminUserKey,
|
||||||
|
$"IMPORT \"{config.SourceSchema}\".\"*\" AS BINARY FROM '{tmpDir}' WITH REPLACE RENAME SCHEMA \"{config.SourceSchema}\" TO \"{aurora}\" THREADS {threads};",
|
||||||
|
sid, ct);
|
||||||
|
|
||||||
|
if (!importResult.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Aurora import failed: {importResult.StdErr}");
|
||||||
|
await ntfy.SendAsync("Aurora Failed", $"Import-rename to '{aurora}' FAILED.", ct);
|
||||||
|
await Cleanup(tmpDir, sid, ct);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Get original company name from CINF
|
||||||
|
logger.Step("Fetching company name from source schema...");
|
||||||
|
var nameResult = await RunSqlScalar(hdbsql, config.AdminUserKey,
|
||||||
|
$"SELECT \"CompnyName\" FROM \"{config.SourceSchema}\".\"CINF\";", sid, ct);
|
||||||
|
var companyName = string.IsNullOrWhiteSpace(nameResult) ? config.SourceSchema : nameResult;
|
||||||
|
var newName = $"AURORA - {companyName} - {DateTime.Now:yyyy-MM-dd}";
|
||||||
|
|
||||||
|
// 6. Update company name in CINF and OADM
|
||||||
|
logger.Step($"Setting company name to '{newName}'...");
|
||||||
|
await RunSql(hdbsql, config.AdminUserKey,
|
||||||
|
$"UPDATE \"{aurora}\".\"CINF\" SET \"CompnyName\" = '{newName}';", sid, ct);
|
||||||
|
await RunSql(hdbsql, config.AdminUserKey,
|
||||||
|
$"UPDATE \"{aurora}\".\"OADM\" SET \"CompnyName\" = '{newName}', \"PrintHeadr\" = '{newName}';", sid, ct);
|
||||||
|
|
||||||
|
// 7. Grant privileges
|
||||||
|
if (!string.IsNullOrWhiteSpace(config.AuroraUser))
|
||||||
|
{
|
||||||
|
logger.Step($"Granting ALL PRIVILEGES on '{aurora}' to '{config.AuroraUser}'...");
|
||||||
|
await RunSql(hdbsql, config.AdminUserKey,
|
||||||
|
$"GRANT ALL PRIVILEGES ON SCHEMA \"{aurora}\" TO \"{config.AuroraUser}\";", sid, ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 8. Cleanup temp directory
|
||||||
|
await Cleanup(tmpDir, sid, ct);
|
||||||
|
|
||||||
|
logger.Success($"Aurora refresh of '{aurora}' completed!");
|
||||||
|
await ntfy.SendAsync("Aurora Complete", $"Aurora refresh of '{aurora}' completed successfully.", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task<ProcessResult> RunSql(
|
||||||
|
string hdbsql, string userKey, string sql, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
var tmpFile = Path.Combine("/tmp", $"ht_{Guid.NewGuid():N}.sql");
|
||||||
|
await File.WriteAllTextAsync(tmpFile, sql, ct);
|
||||||
|
var result = await switcher.RunAsAsync(sid,
|
||||||
|
$"\"{hdbsql}\" -U {userKey} -I \"{tmpFile}\" 2>&1", ct);
|
||||||
|
File.Delete(tmpFile);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task<string> RunSqlScalar(
|
||||||
|
string hdbsql, string userKey, string sql, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
var tmpFile = Path.Combine("/tmp", $"ht_{Guid.NewGuid():N}.sql");
|
||||||
|
await File.WriteAllTextAsync(tmpFile, sql, ct);
|
||||||
|
var result = await switcher.RunAsAsync(sid,
|
||||||
|
$"\"{hdbsql}\" -U {userKey} -a -x -I \"{tmpFile}\" 2>&1", ct);
|
||||||
|
File.Delete(tmpFile);
|
||||||
|
return result.StdOut.Replace("\"", "").Trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
private Task<ProcessResult> RunAs(string sid, string cmd, CancellationToken ct) =>
|
||||||
|
switcher.RunAsAsync(sid, cmd, ct);
|
||||||
|
|
||||||
|
private async Task Cleanup(string dir, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
logger.Step($"Cleaning up temp dir '{dir}'...");
|
||||||
|
await RunAs(sid, $"rm -rf \"{dir}\"", ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
232
Services/BackupService.cs
Normal file
232
Services/BackupService.cs
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Handles tenant backups and schema exports/imports.
|
||||||
|
/// All hdbsql and file-creation operations run as <sid>adm via IUserSwitcher.
|
||||||
|
/// Compression (tar/pigz) runs as <sid>adm so the archive is owned by the HANA user.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class BackupService(
|
||||||
|
IUserSwitcher switcher,
|
||||||
|
IHdbClientLocator locator,
|
||||||
|
INotificationService ntfy,
|
||||||
|
AppLogger logger) : IBackupService
|
||||||
|
{
|
||||||
|
public async Task RunAsync(
|
||||||
|
BackupConfig config, HanaConfig hana, string sid,
|
||||||
|
CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var hdbsql = locator.LocateHdbsql(hana.HdbsqlPath, sid, hana.InstanceNumber);
|
||||||
|
var threads = ResolveThreads(config.Threads);
|
||||||
|
|
||||||
|
switch (config.Type)
|
||||||
|
{
|
||||||
|
case BackupType.Schema:
|
||||||
|
await RunSchemaExportsAsync(config, hdbsql, sid, threads, ct);
|
||||||
|
break;
|
||||||
|
case BackupType.Tenant:
|
||||||
|
await RunTenantBackupAsync(config, hdbsql, sid, ct);
|
||||||
|
break;
|
||||||
|
case BackupType.All:
|
||||||
|
await RunSchemaExportsAsync(config, hdbsql, sid, threads, ct);
|
||||||
|
await RunTenantBackupAsync(config, hdbsql, sid, ct);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Public helpers used by ExportCommand / ImportCommand ─────────────────
|
||||||
|
|
||||||
|
public async Task ExportSchemaAsync(
|
||||||
|
string hdbsql, string userKey, string schema, string targetPath,
|
||||||
|
int threads, bool compress, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
logger.Step($"Exporting schema '{schema}' to '{targetPath}'...");
|
||||||
|
Directory.CreateDirectory(targetPath);
|
||||||
|
|
||||||
|
string exportDir = targetPath;
|
||||||
|
string? archivePath = null;
|
||||||
|
|
||||||
|
if (compress)
|
||||||
|
{
|
||||||
|
var tmpName = $"export_{schema}_{DateTime.Now:yyyyMMdd_HHmmss}";
|
||||||
|
exportDir = Path.Combine(targetPath, tmpName);
|
||||||
|
archivePath = Path.Combine(targetPath, $"{schema}_{DateTime.Now:yyyyMMdd_HHmmss}.tar.gz");
|
||||||
|
await RunAs(sid, $"mkdir -p \"{exportDir}\"", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
var sql = $"EXPORT \"{schema}\".\"*\" AS BINARY INTO '{exportDir}' WITH REPLACE THREADS {threads} NO DEPENDENCIES;";
|
||||||
|
var result = await RunHdbsqlAsync(hdbsql, userKey, sql, sid, ct);
|
||||||
|
if (!result.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Schema export failed for '{schema}': {result.StdErr}");
|
||||||
|
await ntfy.SendAsync("HANA Export Failed", $"Export of schema '{schema}' FAILED.", ct);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (compress && archivePath != null)
|
||||||
|
await CompressAsync(exportDir, archivePath, sid, threads, ct);
|
||||||
|
|
||||||
|
logger.Success($"Schema export of '{schema}' complete.");
|
||||||
|
await ntfy.SendAsync("HANA Export", $"Export of schema '{schema}' completed successfully.", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
public async Task ImportSchemaAsync(
|
||||||
|
string hdbsql, string userKey, string schema, string sourcePath,
|
||||||
|
int threads, bool compress, bool replace, string? newSchema, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
logger.Step($"Importing schema '{schema}'{(newSchema != null ? $" as '{newSchema}'" : "")}...");
|
||||||
|
|
||||||
|
string importDir = sourcePath;
|
||||||
|
string? tmpDir = null;
|
||||||
|
|
||||||
|
if (compress)
|
||||||
|
{
|
||||||
|
tmpDir = Path.Combine("/tmp", $"import_{schema}_{Path.GetRandomFileName()}");
|
||||||
|
await RunAs(sid, $"mkdir -p \"{tmpDir}\"", ct);
|
||||||
|
var decompResult = await RunAs(sid, $"tar -xzf \"{sourcePath}\" -C \"{tmpDir}\" --strip-components=1", ct);
|
||||||
|
if (!decompResult.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Decompression failed: {decompResult.StdErr}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
importDir = tmpDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
var mode = replace ? "REPLACE" : "IGNORE EXISTING";
|
||||||
|
var rename = newSchema != null ? $" RENAME SCHEMA \"{schema}\" TO \"{newSchema}\"" : string.Empty;
|
||||||
|
var sql = $"IMPORT \"{schema}\".\"*\" AS BINARY FROM '{importDir}' WITH {mode}{rename} THREADS {threads};";
|
||||||
|
|
||||||
|
var result = await RunHdbsqlAsync(hdbsql, userKey, sql, sid, ct);
|
||||||
|
var target = newSchema ?? schema;
|
||||||
|
|
||||||
|
if (!result.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Import failed: {result.StdErr}");
|
||||||
|
await ntfy.SendAsync("HANA Import Failed", $"Import of '{schema}' to '{target}' FAILED.", ct);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
logger.Success("Import complete.");
|
||||||
|
await ntfy.SendAsync("HANA Import", $"Import of '{schema}' to '{target}' completed.", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tmpDir != null)
|
||||||
|
await RunAs(sid, $"rm -rf \"{tmpDir}\"", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Private helpers ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
private async Task RunSchemaExportsAsync(
|
||||||
|
BackupConfig config, string hdbsql, string sid, int threads, CancellationToken ct)
|
||||||
|
{
|
||||||
|
foreach (var schema in config.SchemaNames)
|
||||||
|
await ExportSchemaAsync(hdbsql, config.UserKey, schema,
|
||||||
|
config.SchemaBackupPath, threads, config.CompressSchema, sid, ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task RunTenantBackupAsync(
|
||||||
|
BackupConfig config, string hdbsql, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
await BackupTenantAsync(hdbsql, config.UserKey, config.BackupBasePath,
|
||||||
|
config.Compress, sid, ct);
|
||||||
|
|
||||||
|
if (config.BackupSystemDb && !string.IsNullOrWhiteSpace(config.SystemDbUserKey))
|
||||||
|
await BackupTenantAsync(hdbsql, config.SystemDbUserKey, config.BackupBasePath,
|
||||||
|
config.Compress, sid, ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task BackupTenantAsync(
|
||||||
|
string hdbsql, string userKey, string basePath,
|
||||||
|
bool compress, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
logger.Step("Starting tenant backup...");
|
||||||
|
var ts = DateTime.Now.ToString("yyyyMMdd_HHmmss");
|
||||||
|
|
||||||
|
string backupDir = basePath;
|
||||||
|
string? archivePath = null;
|
||||||
|
|
||||||
|
if (compress)
|
||||||
|
{
|
||||||
|
backupDir = Path.Combine(basePath, $"backup_{ts}");
|
||||||
|
archivePath = Path.Combine(basePath, $"backup_{ts}.tar.gz");
|
||||||
|
await RunAs(sid, $"mkdir -p \"{backupDir}\"", ct);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
await RunAs(sid, $"mkdir -p \"{backupDir}\"", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
var prefix = Path.Combine(backupDir, $"backup_{ts}");
|
||||||
|
var sql = $"BACKUP DATA USING FILE ('{prefix}');";
|
||||||
|
var result = await RunHdbsqlAsync(hdbsql, userKey, sql, sid, ct);
|
||||||
|
|
||||||
|
if (!result.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Tenant backup failed: {result.StdErr}");
|
||||||
|
await ntfy.SendAsync("HANA Backup Failed", "Tenant backup FAILED.", ct);
|
||||||
|
if (compress) await RunAs(sid, $"rm -rf \"{backupDir}\"", ct);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (compress && archivePath != null)
|
||||||
|
await CompressAsync(backupDir, archivePath, sid, 0, ct);
|
||||||
|
|
||||||
|
logger.Success("Tenant backup complete.");
|
||||||
|
await ntfy.SendAsync("HANA Backup", "Tenant backup completed successfully.", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task CompressAsync(
|
||||||
|
string sourceDir, string archivePath, string sid, int threads, CancellationToken ct)
|
||||||
|
{
|
||||||
|
logger.Step($"Compressing '{sourceDir}' → '{archivePath}'...");
|
||||||
|
|
||||||
|
// Check for pigz availability
|
||||||
|
var whichPigz = await RunAs(sid, "which pigz 2>/dev/null", ct);
|
||||||
|
string tarCmd;
|
||||||
|
if (whichPigz.Success && !string.IsNullOrWhiteSpace(whichPigz.StdOut))
|
||||||
|
{
|
||||||
|
var pigzThreads = threads > 0 ? threads : ResolveThreads(0);
|
||||||
|
tarCmd = $"tar -I \"pigz -p {pigzThreads}\" -cf \"{archivePath}\" -C \"{sourceDir}\" .";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
tarCmd = $"tar -czf \"{archivePath}\" -C \"{sourceDir}\" .";
|
||||||
|
}
|
||||||
|
|
||||||
|
var result = await RunAs(sid, tarCmd, ct);
|
||||||
|
if (!result.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Compression failed: {result.StdErr}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await RunAs(sid, $"rm -rf \"{sourceDir}\"", ct);
|
||||||
|
logger.Success("Compression complete.");
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task<ProcessResult> RunHdbsqlAsync(
|
||||||
|
string hdbsql, string userKey, string sql, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
// Write SQL to a temp file so no shell-quoting complications arise
|
||||||
|
var tmpFile = Path.Combine("/tmp", $"ht_{Guid.NewGuid():N}.sql");
|
||||||
|
await File.WriteAllTextAsync(tmpFile, sql, ct);
|
||||||
|
|
||||||
|
// chmod so ndbadm can read it
|
||||||
|
await switcher.RunAsAsync(sid, $"chmod 644 \"{tmpFile}\" 2>/dev/null; true", ct);
|
||||||
|
|
||||||
|
var result = await switcher.RunAsAsync(sid,
|
||||||
|
$"\"{hdbsql}\" -U {userKey} -I \"{tmpFile}\" 2>&1", ct);
|
||||||
|
|
||||||
|
File.Delete(tmpFile);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private Task<ProcessResult> RunAs(string sid, string cmd, CancellationToken ct) =>
|
||||||
|
switcher.RunAsAsync(sid, cmd, ct);
|
||||||
|
|
||||||
|
private static int ResolveThreads(int configured) =>
|
||||||
|
configured > 0 ? configured : Math.Max(1, Environment.ProcessorCount / 2);
|
||||||
|
}
|
||||||
68
Services/CleanerService.cs
Normal file
68
Services/CleanerService.cs
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Deletes backup and log files older than their configured retention period.
|
||||||
|
/// Runs as root (no user switching needed for file deletion).
|
||||||
|
/// </summary>
|
||||||
|
public sealed class CleanerService(AppLogger logger) : ICleanerService
|
||||||
|
{
|
||||||
|
public Task RunAsync(CleanerConfig config, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
logger.Step($"Cleaning tenant backup path: {config.TenantBackupPath} "
|
||||||
|
+ $"(retention: {config.TenantRetentionDays}d)");
|
||||||
|
CleanDirectory(config.TenantBackupPath, config.TenantRetentionDays);
|
||||||
|
|
||||||
|
foreach (var logPath in config.LogBackupPaths)
|
||||||
|
{
|
||||||
|
ct.ThrowIfCancellationRequested();
|
||||||
|
logger.Step($"Cleaning log backup path: {logPath} "
|
||||||
|
+ $"(retention: {config.LogRetentionDays}d)");
|
||||||
|
CleanDirectory(logPath, config.LogRetentionDays);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Success("Cleanup complete.");
|
||||||
|
return Task.CompletedTask;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void CleanDirectory(string directory, int retentionDays)
|
||||||
|
{
|
||||||
|
if (!Directory.Exists(directory))
|
||||||
|
{
|
||||||
|
logger.Warning($"Directory not found, skipping: {directory}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var cutoff = DateTime.UtcNow.AddDays(-retentionDays);
|
||||||
|
int deleted = 0;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
foreach (var file in Directory.EnumerateFiles(directory, "*", SearchOption.TopDirectoryOnly))
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
if (File.GetLastWriteTimeUtc(file) < cutoff)
|
||||||
|
{
|
||||||
|
File.Delete(file);
|
||||||
|
deleted++;
|
||||||
|
logger.Info($"Deleted: {file}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
logger.Warning($"Could not delete '{file}': {ex.Message}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
logger.Error($"Error enumerating '{directory}': {ex.Message}");
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info($"Deleted {deleted} file(s) from '{directory}'.");
|
||||||
|
}
|
||||||
|
}
|
||||||
38
Services/FileMonitorStateService.cs
Normal file
38
Services/FileMonitorStateService.cs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// File-based alert state persistence.
|
||||||
|
/// Each key maps to /etc/hanatoolbox/state/<key>.state containing a plain-text value.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class FileMonitorStateService(AppLogger logger) : IMonitorStateService
|
||||||
|
{
|
||||||
|
private static readonly string StateDir = Config.ConfigService.StateDirectory;
|
||||||
|
|
||||||
|
public string? GetState(string key)
|
||||||
|
{
|
||||||
|
var path = StatePath(key);
|
||||||
|
if (!File.Exists(path)) return null;
|
||||||
|
|
||||||
|
try { return File.ReadAllText(path).Trim(); }
|
||||||
|
catch { return null; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public void SetState(string key, string value)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
Directory.CreateDirectory(StateDir);
|
||||||
|
File.WriteAllText(StatePath(key), value);
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
logger.Warning($"Could not write state for '{key}': {ex.Message}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static string StatePath(string key) =>
|
||||||
|
Path.Combine(StateDir, $"{key}.state");
|
||||||
|
}
|
||||||
101
Services/FirewallService.cs
Normal file
101
Services/FirewallService.cs
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Applies firewall rules via firewall-cmd (firewalld).
|
||||||
|
/// Used by CronOrchestrator (non-interactive apply) and FirewallTui (interactive).
|
||||||
|
/// </summary>
|
||||||
|
public sealed class FirewallService(IProcessRunner runner, AppLogger logger) : IFirewallService
|
||||||
|
{
|
||||||
|
public async Task ApplyAsync(FirewallConfig config, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
logger.Step("Applying firewall rules (firewall-cmd)...");
|
||||||
|
|
||||||
|
if (config.FlushBeforeApply)
|
||||||
|
{
|
||||||
|
logger.Step("Flushing existing rules...");
|
||||||
|
await FlushAsync(ct);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Remove insecure catch-all rules as a safety measure
|
||||||
|
await Cmd("--remove-port=0-65535/tcp", ct);
|
||||||
|
await Cmd("--remove-service=ssh", ct);
|
||||||
|
await Cmd("--remove-port=22/tcp", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach (var svc in config.Services)
|
||||||
|
{
|
||||||
|
ct.ThrowIfCancellationRequested();
|
||||||
|
|
||||||
|
switch (svc.Decision)
|
||||||
|
{
|
||||||
|
case FirewallDecision.All:
|
||||||
|
foreach (var port in svc.Ports)
|
||||||
|
{
|
||||||
|
logger.Step($"Opening port {port}/tcp globally ({svc.Name})");
|
||||||
|
await Cmd($"--add-port={port}/tcp", ct);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case FirewallDecision.Ip:
|
||||||
|
foreach (var ip in svc.AllowedIps)
|
||||||
|
{
|
||||||
|
foreach (var port in svc.Ports)
|
||||||
|
{
|
||||||
|
logger.Step($"Restricting port {port}/tcp to {ip} ({svc.Name})");
|
||||||
|
await Cmd(
|
||||||
|
$"--add-rich-rule=rule family='ipv4' source address='{ip}' port port='{port}' protocol='tcp' accept",
|
||||||
|
ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case FirewallDecision.Skip:
|
||||||
|
logger.Info($"Skipping {svc.Name}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save runtime config to permanent
|
||||||
|
logger.Step("Saving rules permanently...");
|
||||||
|
await Cmd("--runtime-to-permanent", ct);
|
||||||
|
logger.Success("Firewall rules applied.");
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task FlushAsync(CancellationToken ct)
|
||||||
|
{
|
||||||
|
// list and remove all services
|
||||||
|
var services = (await RunCmd("--list-services", ct)).StdOut.Trim();
|
||||||
|
foreach (var s in services.Split(' ', StringSplitOptions.RemoveEmptyEntries))
|
||||||
|
await Cmd($"--remove-service={s}", ct);
|
||||||
|
|
||||||
|
// list and remove all ports
|
||||||
|
var ports = (await RunCmd("--list-ports", ct)).StdOut.Trim();
|
||||||
|
foreach (var p in ports.Split(' ', StringSplitOptions.RemoveEmptyEntries))
|
||||||
|
await Cmd($"--remove-port={p}", ct);
|
||||||
|
|
||||||
|
// list and remove rich rules
|
||||||
|
var richRules = (await RunCmd("--list-rich-rules", ct)).StdOut.Trim();
|
||||||
|
foreach (var rule in richRules.Split('\n', StringSplitOptions.RemoveEmptyEntries))
|
||||||
|
{
|
||||||
|
if (!string.IsNullOrWhiteSpace(rule))
|
||||||
|
await Cmd($"--remove-rich-rule={rule.Trim()}", ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Task<ProcessResult> Cmd(string args, CancellationToken ct) =>
|
||||||
|
RunCmd(args, ct);
|
||||||
|
|
||||||
|
private async Task<ProcessResult> RunCmd(string args, CancellationToken ct)
|
||||||
|
{
|
||||||
|
// Split simple args by space (they won't contain spaces except rich-rules)
|
||||||
|
// Use shell to handle complex rich-rule strings
|
||||||
|
var result = await runner.RunAsync(
|
||||||
|
"/bin/bash", ["-c", $"firewall-cmd {args}"], ct);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
42
Services/HdbClientLocator.cs
Normal file
42
Services/HdbClientLocator.cs
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Locates hdbsql and hdbuserstore binaries using:
|
||||||
|
/// 1. Value from config (if set)
|
||||||
|
/// 2. `which` lookup on PATH
|
||||||
|
/// 3. /usr/sap/hdbclient/
|
||||||
|
/// 4. /usr/sap/<SID>/HDB<instance>/exe/
|
||||||
|
/// </summary>
|
||||||
|
public sealed class HdbClientLocator(IProcessRunner runner) : IHdbClientLocator
|
||||||
|
{
|
||||||
|
public string LocateHdbsql(string? configuredPath, string sid, string instanceNumber) =>
|
||||||
|
Locate("hdbsql", configuredPath, sid, instanceNumber);
|
||||||
|
|
||||||
|
public string LocateHdbuserstore(string? configuredPath, string sid, string instanceNumber) =>
|
||||||
|
Locate("hdbuserstore", configuredPath, sid, instanceNumber);
|
||||||
|
|
||||||
|
private string Locate(string binary, string? configuredPath, string sid, string instanceNumber)
|
||||||
|
{
|
||||||
|
// 1. User-configured explicit path
|
||||||
|
if (!string.IsNullOrWhiteSpace(configuredPath) && File.Exists(configuredPath))
|
||||||
|
return configuredPath;
|
||||||
|
|
||||||
|
// 2. which <binary>
|
||||||
|
var whichResult = runner.RunAsync("/usr/bin/which", [binary]).GetAwaiter().GetResult();
|
||||||
|
if (whichResult.Success && !string.IsNullOrWhiteSpace(whichResult.StdOut))
|
||||||
|
return whichResult.StdOut.Split('\n')[0].Trim();
|
||||||
|
|
||||||
|
// 3. /usr/sap/hdbclient/
|
||||||
|
var path3 = $"/usr/sap/hdbclient/{binary}";
|
||||||
|
if (File.Exists(path3)) return path3;
|
||||||
|
|
||||||
|
// 4. /usr/sap/<SID>/HDB<instance>/exe/
|
||||||
|
var path4 = $"/usr/sap/{sid.ToUpperInvariant()}/HDB{instanceNumber}/exe/{binary}";
|
||||||
|
if (File.Exists(path4)) return path4;
|
||||||
|
|
||||||
|
throw new FileNotFoundException(
|
||||||
|
$"Could not locate '{binary}'. Set HdbsqlPath/HdbuserstorePath in hanatoolbox.json or ensure it is on PATH.");
|
||||||
|
}
|
||||||
|
}
|
||||||
8
Services/Interfaces/IAuroraService.cs
Normal file
8
Services/Interfaces/IAuroraService.cs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface IAuroraService
|
||||||
|
{
|
||||||
|
Task RunAsync(AuroraConfig config, HanaConfig hana, string sid, CancellationToken ct = default);
|
||||||
|
}
|
||||||
8
Services/Interfaces/IBackupService.cs
Normal file
8
Services/Interfaces/IBackupService.cs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface IBackupService
|
||||||
|
{
|
||||||
|
Task RunAsync(BackupConfig config, HanaConfig hana, string sid, CancellationToken ct = default);
|
||||||
|
}
|
||||||
8
Services/Interfaces/ICleanerService.cs
Normal file
8
Services/Interfaces/ICleanerService.cs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface ICleanerService
|
||||||
|
{
|
||||||
|
Task RunAsync(CleanerConfig config, CancellationToken ct = default);
|
||||||
|
}
|
||||||
9
Services/Interfaces/IFirewallService.cs
Normal file
9
Services/Interfaces/IFirewallService.cs
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface IFirewallService
|
||||||
|
{
|
||||||
|
/// <summary>Applies the saved firewall config non-interactively (cron mode).</summary>
|
||||||
|
Task ApplyAsync(FirewallConfig config, CancellationToken ct = default);
|
||||||
|
}
|
||||||
10
Services/Interfaces/IHdbClientLocator.cs
Normal file
10
Services/Interfaces/IHdbClientLocator.cs
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface IHdbClientLocator
|
||||||
|
{
|
||||||
|
/// <summary>Returns the resolved path to hdbsql. Throws if not found.</summary>
|
||||||
|
string LocateHdbsql(string? configuredPath, string sid, string instanceNumber);
|
||||||
|
|
||||||
|
/// <summary>Returns the resolved path to hdbuserstore. Throws if not found.</summary>
|
||||||
|
string LocateHdbuserstore(string? configuredPath, string sid, string instanceNumber);
|
||||||
|
}
|
||||||
15
Services/Interfaces/IKeyManagerService.cs
Normal file
15
Services/Interfaces/IKeyManagerService.cs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface IKeyManagerService
|
||||||
|
{
|
||||||
|
Task<bool> CreateKeyAsync(
|
||||||
|
string keyName, string connectionString,
|
||||||
|
string user, string password,
|
||||||
|
string sid, CancellationToken ct = default);
|
||||||
|
|
||||||
|
Task<bool> DeleteKeyAsync(string keyName, string sid, CancellationToken ct = default);
|
||||||
|
|
||||||
|
Task<IReadOnlyList<string>> ListKeysAsync(string sid, CancellationToken ct = default);
|
||||||
|
|
||||||
|
Task<bool> TestKeyAsync(string hdbsqlPath, string keyName, string sid, CancellationToken ct = default);
|
||||||
|
}
|
||||||
8
Services/Interfaces/IMonitorService.cs
Normal file
8
Services/Interfaces/IMonitorService.cs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface IMonitorService
|
||||||
|
{
|
||||||
|
Task RunAsync(MonitorConfig config, HanaConfig hana, string sid, CancellationToken ct = default);
|
||||||
|
}
|
||||||
7
Services/Interfaces/IMonitorStateService.cs
Normal file
7
Services/Interfaces/IMonitorStateService.cs
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface IMonitorStateService
|
||||||
|
{
|
||||||
|
string? GetState(string key);
|
||||||
|
void SetState(string key, string value);
|
||||||
|
}
|
||||||
6
Services/Interfaces/INotificationService.cs
Normal file
6
Services/Interfaces/INotificationService.cs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface INotificationService
|
||||||
|
{
|
||||||
|
Task SendAsync(string title, string message, CancellationToken ct = default);
|
||||||
|
}
|
||||||
13
Services/Interfaces/IProcessRunner.cs
Normal file
13
Services/Interfaces/IProcessRunner.cs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public sealed record ProcessResult(int ExitCode, string StdOut, string StdErr)
|
||||||
|
{
|
||||||
|
public bool Success => ExitCode == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface IProcessRunner
|
||||||
|
{
|
||||||
|
Task<ProcessResult> RunAsync(
|
||||||
|
string executable, string[] args,
|
||||||
|
CancellationToken ct = default);
|
||||||
|
}
|
||||||
14
Services/Interfaces/IUserSwitcher.cs
Normal file
14
Services/Interfaces/IUserSwitcher.cs
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
public interface IUserSwitcher
|
||||||
|
{
|
||||||
|
/// <summary>
|
||||||
|
/// Executes a shell command string as <sid>adm using `su - <sid>adm -c`.
|
||||||
|
/// If already running as <sid>adm, runs the command directly.
|
||||||
|
/// </summary>
|
||||||
|
Task<ProcessResult> RunAsAsync(
|
||||||
|
string sid, string shellCommand,
|
||||||
|
CancellationToken ct = default);
|
||||||
|
}
|
||||||
74
Services/KeyManagerService.cs
Normal file
74
Services/KeyManagerService.cs
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Wraps hdbuserstore operations using SuUserSwitcher to run as <sid>adm.
|
||||||
|
/// Keys stored in the OS user's home directory (~/.hdbusers/).
|
||||||
|
/// </summary>
|
||||||
|
public sealed class KeyManagerService(
|
||||||
|
IUserSwitcher switcher,
|
||||||
|
IHdbClientLocator locator,
|
||||||
|
AppLogger logger) : IKeyManagerService
|
||||||
|
{
|
||||||
|
public async Task<bool> CreateKeyAsync(
|
||||||
|
string keyName, string connectionString,
|
||||||
|
string user, string password,
|
||||||
|
string sid, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var hdbus = locator.LocateHdbuserstore(null, sid, "00");
|
||||||
|
var result = await switcher.RunAsAsync(sid,
|
||||||
|
$"\"{hdbus}\" SET \"{keyName}\" \"{connectionString}\" \"{user}\" \"{password}\"", ct);
|
||||||
|
|
||||||
|
if (!result.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Failed to create key '{keyName}': {result.StdErr}");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
logger.Success($"Key '{keyName}' created.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async Task<bool> DeleteKeyAsync(string keyName, string sid, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var hdbus = locator.LocateHdbuserstore(null, sid, "00");
|
||||||
|
var result = await switcher.RunAsAsync(sid,
|
||||||
|
$"\"{hdbus}\" DELETE \"{keyName}\"", ct);
|
||||||
|
|
||||||
|
if (!result.Success)
|
||||||
|
{
|
||||||
|
logger.Error($"Failed to delete key '{keyName}': {result.StdErr}");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
logger.Success($"Key '{keyName}' deleted.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async Task<IReadOnlyList<string>> ListKeysAsync(string sid, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var hdbus = locator.LocateHdbuserstore(null, sid, "00");
|
||||||
|
var result = await switcher.RunAsAsync(sid, $"\"{hdbus}\" LIST", ct);
|
||||||
|
|
||||||
|
if (!result.Success) return [];
|
||||||
|
|
||||||
|
// Parse lines like: "KEY mykey" from hdbuserstore LIST output
|
||||||
|
return result.StdOut
|
||||||
|
.Split('\n', StringSplitOptions.RemoveEmptyEntries)
|
||||||
|
.Where(l => l.TrimStart().StartsWith("KEY "))
|
||||||
|
.Select(l => l.Trim()[4..].Trim()) // strip "KEY "
|
||||||
|
.ToList();
|
||||||
|
}
|
||||||
|
|
||||||
|
public async Task<bool> TestKeyAsync(
|
||||||
|
string hdbsqlPath, string keyName, string sid, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var result = await switcher.RunAsAsync(sid,
|
||||||
|
$"\"{hdbsqlPath}\" -U \"{keyName}\" \"SELECT 'Connection successful' FROM DUMMY\"", ct);
|
||||||
|
|
||||||
|
var ok = result.Success && result.StdOut.Contains("Connection successful");
|
||||||
|
if (ok) logger.Success($"Key '{keyName}' connection test passed.");
|
||||||
|
else logger.Error($"Key '{keyName}' connection test failed: {result.StdErr}");
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
}
|
||||||
229
Services/MonitorService.cs
Normal file
229
Services/MonitorService.cs
Normal file
@@ -0,0 +1,229 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Monitors HANA health: process status, disk usage, log segments, statement queue, backup age.
|
||||||
|
/// Sends state-change notifications via ntfy to avoid alert spam.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class MonitorService(
|
||||||
|
IProcessRunner runner,
|
||||||
|
IUserSwitcher switcher,
|
||||||
|
IHdbClientLocator locator,
|
||||||
|
INotificationService ntfy,
|
||||||
|
IMonitorStateService state,
|
||||||
|
AppLogger logger) : IMonitorService
|
||||||
|
{
|
||||||
|
public async Task RunAsync(
|
||||||
|
MonitorConfig config, HanaConfig hana, string sid,
|
||||||
|
CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var hdbsql = locator.LocateHdbsql(hana.HdbsqlPath, sid, hana.InstanceNumber);
|
||||||
|
var host = System.Net.Dns.GetHostName();
|
||||||
|
var prefix = $"[{config.CompanyName} | {host}]";
|
||||||
|
|
||||||
|
// 1. HANA processes (sapcontrol runs as root)
|
||||||
|
logger.Step("Checking HANA processes...");
|
||||||
|
var sapResult = await runner.RunAsync(
|
||||||
|
config.SapcontrolPath,
|
||||||
|
["-nr", config.HanaInstanceNumber, "-function", "GetProcessList"], ct);
|
||||||
|
|
||||||
|
var nonGreen = sapResult.StdOut
|
||||||
|
.Split('\n', StringSplitOptions.RemoveEmptyEntries)
|
||||||
|
.Skip(5) // skip header lines
|
||||||
|
.Where(l => !l.Contains("GREEN"))
|
||||||
|
.ToList();
|
||||||
|
|
||||||
|
if (nonGreen.Count > 0)
|
||||||
|
{
|
||||||
|
var msg = string.Join(", ", nonGreen.Select(l => l.Trim()));
|
||||||
|
await NotifyIfChanged("hana_processes", "HANA Process",
|
||||||
|
$"{prefix} One or more HANA processes are not GREEN: {msg}",
|
||||||
|
isAlert: true, currentVal: $"ALERT:{msg}", ct);
|
||||||
|
return; // Exit early — other checks may also fail
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
await NotifyIfChanged("hana_processes", "HANA Process",
|
||||||
|
$"{prefix} All HANA processes are GREEN.",
|
||||||
|
isAlert: false, currentVal: "OK", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Disk usage
|
||||||
|
logger.Step("Checking disk usage...");
|
||||||
|
foreach (var dir in config.DirectoriesToMonitor)
|
||||||
|
{
|
||||||
|
ct.ThrowIfCancellationRequested();
|
||||||
|
var dfResult = await runner.RunAsync("/bin/df", ["-h", dir], ct);
|
||||||
|
var usageStr = dfResult.StdOut
|
||||||
|
.Split('\n', StringSplitOptions.RemoveEmptyEntries)
|
||||||
|
.Skip(1).FirstOrDefault()
|
||||||
|
?.Split(' ', StringSplitOptions.RemoveEmptyEntries)
|
||||||
|
.ElementAtOrDefault(4)
|
||||||
|
?.TrimEnd('%');
|
||||||
|
|
||||||
|
if (!int.TryParse(usageStr, out var usage))
|
||||||
|
{
|
||||||
|
logger.Warning($"Could not parse disk usage for '{dir}'.");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
var key = $"disk_{dir.Replace('/', '_')}";
|
||||||
|
if (usage > config.DiskUsageThresholdPercent)
|
||||||
|
{
|
||||||
|
await NotifyIfChanged(key, "HANA Disk",
|
||||||
|
$"{prefix} Disk usage for '{dir}' is at {usage}% (threshold: {config.DiskUsageThresholdPercent}%).",
|
||||||
|
isAlert: true, currentVal: $"{usage}%", ct);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
await NotifyIfChanged(key, "HANA Disk",
|
||||||
|
$"{prefix} Disk '{dir}' is at {usage}% (OK).",
|
||||||
|
isAlert: false, currentVal: "OK", ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Log segments
|
||||||
|
logger.Step("Checking HANA log segments...");
|
||||||
|
var segSql = "SELECT b.host, b.service_name, a.state, count(*) " +
|
||||||
|
"FROM PUBLIC.M_LOG_SEGMENTS a " +
|
||||||
|
"JOIN PUBLIC.M_SERVICES b ON (a.host = b.host AND a.port = b.port) " +
|
||||||
|
"GROUP BY b.host, b.service_name, a.state;";
|
||||||
|
|
||||||
|
var segResult = await RunSql(hdbsql, config.HanaUserKey, segSql, sid, false, ct);
|
||||||
|
|
||||||
|
int total = 0, truncated = 0, free = 0;
|
||||||
|
foreach (var line in segResult.StdOut.Split('\n', StringSplitOptions.RemoveEmptyEntries))
|
||||||
|
{
|
||||||
|
if (line.Contains("host") || line.Contains("HOST")) continue;
|
||||||
|
var parts = line.Replace("\"", "").Split(',');
|
||||||
|
if (parts.Length < 4) continue;
|
||||||
|
if (!int.TryParse(parts[3].Trim(), out var cnt)) continue;
|
||||||
|
total += cnt;
|
||||||
|
var seg = parts[2].Trim();
|
||||||
|
if (seg == "Truncated") truncated += cnt;
|
||||||
|
else if (seg == "Free") free += cnt;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (total > 0)
|
||||||
|
{
|
||||||
|
var truncPct = truncated * 100 / total;
|
||||||
|
var freePct = free * 100 / total;
|
||||||
|
|
||||||
|
if (truncPct > config.TruncatedSegmentThresholdPercent)
|
||||||
|
await NotifyIfChanged("hana_log_truncated", "HANA Log Segment",
|
||||||
|
$"{prefix} {truncPct}% of log segments are 'Truncated' (threshold: {config.TruncatedSegmentThresholdPercent}%).",
|
||||||
|
isAlert: true, currentVal: $"{truncPct}%", ct);
|
||||||
|
else
|
||||||
|
await NotifyIfChanged("hana_log_truncated", "HANA Log Segment",
|
||||||
|
$"{prefix} Log segments OK ({truncPct}% truncated).",
|
||||||
|
isAlert: false, currentVal: "OK", ct);
|
||||||
|
|
||||||
|
if (freePct < config.FreeSegmentThresholdPercent)
|
||||||
|
await NotifyIfChanged("hana_log_free", "HANA Log Segment",
|
||||||
|
$"{prefix} Only {freePct}% of log segments are 'Free' (threshold: {config.FreeSegmentThresholdPercent}%).",
|
||||||
|
isAlert: true, currentVal: $"{freePct}%", ct);
|
||||||
|
else
|
||||||
|
await NotifyIfChanged("hana_log_free", "HANA Log Segment",
|
||||||
|
$"{prefix} Free log segments OK ({freePct}%).",
|
||||||
|
isAlert: false, currentVal: "OK", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Statement queue
|
||||||
|
logger.Step("Checking HANA statement queue...");
|
||||||
|
var queueSql = "SELECT COUNT(*) FROM M_SERVICE_THREADS " +
|
||||||
|
"WHERE THREAD_TYPE = 'SqlExecutor' AND THREAD_STATE = 'Queueing';";
|
||||||
|
var queueResult = await RunSql(hdbsql, config.HanaUserKey, queueSql, sid, scalar: true, ct);
|
||||||
|
|
||||||
|
if (int.TryParse(queueResult.StdOut.Trim().Replace("\"", ""), out var queueCount))
|
||||||
|
{
|
||||||
|
var breachStr = state.GetState("statement_queue_breach_count") ?? "0";
|
||||||
|
var breachCount = int.TryParse(breachStr, out var b) ? b : 0;
|
||||||
|
|
||||||
|
if (queueCount > config.StatementQueueThreshold)
|
||||||
|
breachCount++;
|
||||||
|
else
|
||||||
|
breachCount = 0;
|
||||||
|
|
||||||
|
state.SetState("statement_queue_breach_count", breachCount.ToString());
|
||||||
|
|
||||||
|
if (breachCount >= config.StatementQueueConsecutiveRuns)
|
||||||
|
await NotifyIfChanged("hana_statement_queue", "HANA Statement Queue",
|
||||||
|
$"{prefix} Statement queue has been over {config.StatementQueueThreshold} for {breachCount} checks. Current: {queueCount}.",
|
||||||
|
isAlert: true, currentVal: $"ALERT:{queueCount}", ct);
|
||||||
|
else
|
||||||
|
await NotifyIfChanged("hana_statement_queue", "HANA Statement Queue",
|
||||||
|
$"{prefix} Statement queue is normal ({queueCount}).",
|
||||||
|
isAlert: false, currentVal: "OK", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Backup age
|
||||||
|
logger.Step("Checking last successful backup age...");
|
||||||
|
var bakSql = "SELECT TOP 1 SYS_START_TIME FROM M_BACKUP_CATALOG " +
|
||||||
|
"WHERE ENTRY_TYPE_NAME = 'complete data backup' AND STATE_NAME = 'successful' " +
|
||||||
|
"ORDER BY SYS_START_TIME DESC;";
|
||||||
|
var bakResult = await RunSql(hdbsql, config.HanaUserKey, bakSql, sid, scalar: true, ct);
|
||||||
|
var bakDateStr = bakResult.StdOut.Trim().Replace("\"", "").Split('.')[0];
|
||||||
|
|
||||||
|
if (string.IsNullOrWhiteSpace(bakDateStr) || !DateTime.TryParse(bakDateStr, out var lastBak))
|
||||||
|
{
|
||||||
|
await NotifyIfChanged("hana_backup_status", "HANA Backup",
|
||||||
|
$"{prefix} No successful backup found.",
|
||||||
|
isAlert: true, currentVal: "NO_BACKUP", ct);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
var ageHours = (int)(DateTime.UtcNow - lastBak.ToUniversalTime()).TotalHours;
|
||||||
|
if (ageHours > config.BackupThresholdHours)
|
||||||
|
await NotifyIfChanged("hana_backup_status", "HANA Backup",
|
||||||
|
$"{prefix} Last successful backup is {ageHours}h old (threshold: {config.BackupThresholdHours}h). Last backup: {lastBak:yyyy-MM-dd HH:mm}.",
|
||||||
|
isAlert: true, currentVal: $"{ageHours}h", ct);
|
||||||
|
else
|
||||||
|
await NotifyIfChanged("hana_backup_status", "HANA Backup",
|
||||||
|
$"{prefix} Backup age is {ageHours}h (OK).",
|
||||||
|
isAlert: false, currentVal: "OK", ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Success("Monitor check complete.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Helpers ───────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
private async Task<ProcessResult> RunSql(
|
||||||
|
string hdbsql, string userKey, string sql, string sid,
|
||||||
|
bool scalar, CancellationToken ct)
|
||||||
|
{
|
||||||
|
var tmpFile = Path.Combine("/tmp", $"ht_{Guid.NewGuid():N}.sql");
|
||||||
|
await File.WriteAllTextAsync(tmpFile, sql, ct);
|
||||||
|
var flags = scalar ? $"-a -x" : string.Empty;
|
||||||
|
var result = await switcher.RunAsAsync(sid,
|
||||||
|
$"\"{hdbsql}\" -U {userKey} {flags} -I \"{tmpFile}\" 2>&1", ct);
|
||||||
|
File.Delete(tmpFile);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task NotifyIfChanged(
|
||||||
|
string key, string titlePrefix, string message,
|
||||||
|
bool isAlert, string currentVal, CancellationToken ct)
|
||||||
|
{
|
||||||
|
var prev = state.GetState(key);
|
||||||
|
if (currentVal == prev) return; // No change — don't spam
|
||||||
|
|
||||||
|
string title;
|
||||||
|
if (isAlert)
|
||||||
|
title = $"{titlePrefix} Alert";
|
||||||
|
else if (!string.IsNullOrEmpty(prev) && prev != "OK")
|
||||||
|
title = $"{titlePrefix} Resolved";
|
||||||
|
else
|
||||||
|
{
|
||||||
|
state.SetState(key, currentVal);
|
||||||
|
return; // Transition OK→OK: update silently
|
||||||
|
}
|
||||||
|
|
||||||
|
await ntfy.SendAsync(title, message, ct);
|
||||||
|
state.SetState(key, currentVal);
|
||||||
|
logger.Info($"Notification sent: [{title}]");
|
||||||
|
}
|
||||||
|
}
|
||||||
39
Services/NtfyNotificationService.cs
Normal file
39
Services/NtfyNotificationService.cs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Sends notifications to a ntfy server.
|
||||||
|
/// URL and token are read from <see cref="NtfyConfig"/> (set during onboarding).
|
||||||
|
/// Failures are silently swallowed — notifications must never crash the main flow.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class NtfyNotificationService(NtfyConfig config, AppLogger logger)
|
||||||
|
: INotificationService
|
||||||
|
{
|
||||||
|
private static readonly HttpClient Http = new();
|
||||||
|
|
||||||
|
public async Task SendAsync(string title, string message, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
if (string.IsNullOrWhiteSpace(config.Url) || string.IsNullOrWhiteSpace(config.Token))
|
||||||
|
{
|
||||||
|
logger.Info("Ntfy not configured — skipping notification.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
using var request = new HttpRequestMessage(HttpMethod.Post, config.Url);
|
||||||
|
request.Headers.Add("Authorization", $"Bearer {config.Token}");
|
||||||
|
request.Headers.Add("Title", title);
|
||||||
|
request.Content = new StringContent(message);
|
||||||
|
await Http.SendAsync(request, ct);
|
||||||
|
logger.Info($"Notification sent: [{title}] {message}");
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
logger.Warning($"Failed to send ntfy notification: {ex.Message}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
43
Services/ProcessRunner.cs
Normal file
43
Services/ProcessRunner.cs
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
using System.Diagnostics;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>Runs external processes and captures stdout/stderr.</summary>
|
||||||
|
public sealed class ProcessRunner(AppLogger logger) : IProcessRunner
|
||||||
|
{
|
||||||
|
public async Task<ProcessResult> RunAsync(
|
||||||
|
string executable, string[] args,
|
||||||
|
CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var psi = new ProcessStartInfo
|
||||||
|
{
|
||||||
|
FileName = executable,
|
||||||
|
RedirectStandardOutput = true,
|
||||||
|
RedirectStandardError = true,
|
||||||
|
UseShellExecute = false,
|
||||||
|
CreateNoWindow = true,
|
||||||
|
};
|
||||||
|
foreach (var arg in args) psi.ArgumentList.Add(arg);
|
||||||
|
|
||||||
|
logger.Info($"Exec: {executable} {string.Join(' ', args)}");
|
||||||
|
|
||||||
|
using var process = new Process { StartInfo = psi };
|
||||||
|
process.Start();
|
||||||
|
|
||||||
|
var stdoutTask = process.StandardOutput.ReadToEndAsync(ct);
|
||||||
|
var stderrTask = process.StandardError.ReadToEndAsync(ct);
|
||||||
|
|
||||||
|
await process.WaitForExitAsync(ct);
|
||||||
|
|
||||||
|
var stdout = await stdoutTask;
|
||||||
|
var stderr = await stderrTask;
|
||||||
|
|
||||||
|
logger.Info($"Exit code: {process.ExitCode}");
|
||||||
|
if (!string.IsNullOrWhiteSpace(stderr))
|
||||||
|
logger.Info($"Stderr: {stderr.Trim()}");
|
||||||
|
|
||||||
|
return new ProcessResult(process.ExitCode, stdout.Trim(), stderr.Trim());
|
||||||
|
}
|
||||||
|
}
|
||||||
74
Services/ServiceFactory.cs
Normal file
74
Services/ServiceFactory.cs
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Scheduling;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Simple factory for wiring service dependencies without a DI container.
|
||||||
|
/// AOT-safe — no reflection, no generic activators.
|
||||||
|
/// </summary>
|
||||||
|
public static class ServiceFactory
|
||||||
|
{
|
||||||
|
public static ProcessRunner CreateRunner(AppLogger log) => new(log);
|
||||||
|
|
||||||
|
public static SuUserSwitcher CreateSwitcher(AppLogger log) => new(CreateRunner(log));
|
||||||
|
|
||||||
|
public static HdbClientLocator CreateLocator(AppLogger log) => new(CreateRunner(log));
|
||||||
|
|
||||||
|
public static NtfyNotificationService CreateNtfy(AppLogger log, NtfyConfig ntfy) =>
|
||||||
|
new(ntfy, log);
|
||||||
|
|
||||||
|
public static FileMonitorStateService CreateState(AppLogger log) => new(log);
|
||||||
|
|
||||||
|
public static BackupService CreateBackupService(AppLogger log)
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var runner = CreateRunner(log);
|
||||||
|
var switcher = new SuUserSwitcher(runner);
|
||||||
|
var locator = new HdbClientLocator(runner);
|
||||||
|
var ntfy = CreateNtfy(log, config.Ntfy);
|
||||||
|
return new BackupService(switcher, locator, ntfy, log);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static AuroraService CreateAuroraService(AppLogger log)
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var runner = CreateRunner(log);
|
||||||
|
var switcher = new SuUserSwitcher(runner);
|
||||||
|
var locator = new HdbClientLocator(runner);
|
||||||
|
var ntfy = CreateNtfy(log, config.Ntfy);
|
||||||
|
return new AuroraService(switcher, locator, ntfy, log);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static MonitorService CreateMonitorService(AppLogger log)
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var runner = CreateRunner(log);
|
||||||
|
var switcher = new SuUserSwitcher(runner);
|
||||||
|
var locator = new HdbClientLocator(runner);
|
||||||
|
var ntfy = CreateNtfy(log, config.Ntfy);
|
||||||
|
var state = CreateState(log);
|
||||||
|
return new MonitorService(runner, switcher, locator, ntfy, state, log);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static CronOrchestrator CreateCronOrchestrator(AppLogger log)
|
||||||
|
{
|
||||||
|
var config = ConfigService.Load();
|
||||||
|
var runner = CreateRunner(log);
|
||||||
|
var switcher = new SuUserSwitcher(runner);
|
||||||
|
var locator = new HdbClientLocator(runner);
|
||||||
|
var ntfy = CreateNtfy(log, config.Ntfy);
|
||||||
|
var state = CreateState(log);
|
||||||
|
|
||||||
|
return new CronOrchestrator(
|
||||||
|
monitor: new MonitorService(runner, switcher, locator, ntfy, state, log),
|
||||||
|
backup: new BackupService(switcher, locator, ntfy, log),
|
||||||
|
cleaner: new CleanerService(log),
|
||||||
|
aurora: new AuroraService(switcher, locator, ntfy, log),
|
||||||
|
firewall: new FirewallService(runner, log),
|
||||||
|
stateService: state,
|
||||||
|
logger: log);
|
||||||
|
}
|
||||||
|
}
|
||||||
26
Services/SuUserSwitcher.cs
Normal file
26
Services/SuUserSwitcher.cs
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Services;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Wraps a shell command to run as <sid>adm using `su - <sid>adm -c '...'`.
|
||||||
|
/// If the current OS user is already <sid>adm, the command is executed directly.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class SuUserSwitcher(IProcessRunner runner) : IUserSwitcher
|
||||||
|
{
|
||||||
|
public async Task<ProcessResult> RunAsAsync(
|
||||||
|
string sid, string shellCommand,
|
||||||
|
CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var sidAdm = $"{sid.ToLowerInvariant()}adm";
|
||||||
|
|
||||||
|
if (string.Equals(Environment.UserName, sidAdm, StringComparison.OrdinalIgnoreCase))
|
||||||
|
{
|
||||||
|
// Already running as the correct user — run directly via bash
|
||||||
|
return await runner.RunAsync("/bin/bash", ["-c", shellCommand], ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use `su -` to inherit the user's environment (HOME, PATH, etc.)
|
||||||
|
return await runner.RunAsync("/bin/su", ["-", sidAdm, "-c", shellCommand], ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
137
Tui/CronSetupTui.cs
Normal file
137
Tui/CronSetupTui.cs
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using Spectre.Console;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Tui;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// TUI wizard for configuring cron task schedules and settings.
|
||||||
|
/// Saves results back to the provided AppConfig (caller is responsible for persisting).
|
||||||
|
/// </summary>
|
||||||
|
public sealed class CronSetupTui
|
||||||
|
{
|
||||||
|
public AppConfig Run(AppConfig current)
|
||||||
|
{
|
||||||
|
AnsiConsole.Clear();
|
||||||
|
AnsiConsole.Write(new Rule("[cyan]Cron Task Configuration[/]").RuleStyle("cyan"));
|
||||||
|
AnsiConsole.MarkupLine("[grey]Configure which tasks run automatically and when.[/]\n");
|
||||||
|
|
||||||
|
ConfigureBackup(current.Backup);
|
||||||
|
ConfigureCleaner(current.Cleaner);
|
||||||
|
ConfigureAurora(current.Aurora, current.Hana);
|
||||||
|
ConfigureFirewall(current.Firewall);
|
||||||
|
ConfigureMonitor(current.Monitor);
|
||||||
|
|
||||||
|
return current;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void ConfigureBackup(BackupConfig b)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule("[green]Backup[/]").RuleStyle("green"));
|
||||||
|
b.Enabled = AnsiConsole.Confirm("Enable scheduled backup?", b.Enabled);
|
||||||
|
if (!b.Enabled) { AnsiConsole.WriteLine(); return; }
|
||||||
|
|
||||||
|
b.ScheduleHour = AnsiConsole.Prompt(new TextPrompt<int>("Run hour (0-23):").DefaultValue(b.ScheduleHour));
|
||||||
|
b.ScheduleMinute = AnsiConsole.Prompt(new TextPrompt<int>("Run minute (0-59):").DefaultValue(b.ScheduleMinute));
|
||||||
|
|
||||||
|
var typeStr = AnsiConsole.Prompt(
|
||||||
|
new SelectionPrompt<string>()
|
||||||
|
.Title("Backup type:")
|
||||||
|
.AddChoices("All", "Tenant", "Schema")
|
||||||
|
.HighlightStyle("cyan"));
|
||||||
|
b.Type = Enum.Parse<BackupType>(typeStr);
|
||||||
|
b.UserKey = AnsiConsole.Prompt(new TextPrompt<string>("hdbuserstore key:").DefaultValue(b.UserKey));
|
||||||
|
b.BackupBasePath = AnsiConsole.Prompt(new TextPrompt<string>("Tenant backup path:").DefaultValue(b.BackupBasePath));
|
||||||
|
b.Compress = AnsiConsole.Confirm("Compress tenant backup?", b.Compress);
|
||||||
|
|
||||||
|
if (b.Type is BackupType.Schema or BackupType.All)
|
||||||
|
{
|
||||||
|
b.SchemaBackupPath = AnsiConsole.Prompt(new TextPrompt<string>("Schema backup path:").DefaultValue(b.SchemaBackupPath));
|
||||||
|
b.CompressSchema = AnsiConsole.Confirm("Compress schema backup?", b.CompressSchema);
|
||||||
|
var schemas = AnsiConsole.Prompt(new TextPrompt<string>("Schema names (comma-separated):")
|
||||||
|
.DefaultValue(string.Join(",", b.SchemaNames)));
|
||||||
|
b.SchemaNames = schemas.Split(',', StringSplitOptions.RemoveEmptyEntries)
|
||||||
|
.Select(s => s.Trim()).ToList();
|
||||||
|
}
|
||||||
|
|
||||||
|
b.BackupSystemDb = AnsiConsole.Confirm("Also backup SYSTEMDB?", b.BackupSystemDb);
|
||||||
|
if (b.BackupSystemDb)
|
||||||
|
b.SystemDbUserKey = AnsiConsole.Prompt(
|
||||||
|
new TextPrompt<string>("SYSTEMDB hdbuserstore key:").DefaultValue(b.SystemDbUserKey));
|
||||||
|
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void ConfigureCleaner(CleanerConfig c)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule("[green]Cleaner[/]").RuleStyle("green"));
|
||||||
|
c.Enabled = AnsiConsole.Confirm("Enable scheduled cleanup?", c.Enabled);
|
||||||
|
if (!c.Enabled) { AnsiConsole.WriteLine(); return; }
|
||||||
|
|
||||||
|
c.ScheduleHour = AnsiConsole.Prompt(new TextPrompt<int>("Run hour (0-23):").DefaultValue(c.ScheduleHour));
|
||||||
|
c.ScheduleMinute = AnsiConsole.Prompt(new TextPrompt<int>("Run minute (0-59):").DefaultValue(c.ScheduleMinute));
|
||||||
|
c.TenantBackupPath = AnsiConsole.Prompt(new TextPrompt<string>("Tenant backup path:").DefaultValue(c.TenantBackupPath));
|
||||||
|
c.TenantRetentionDays = AnsiConsole.Prompt(new TextPrompt<int>("Tenant retention (days):").DefaultValue(c.TenantRetentionDays));
|
||||||
|
c.LogRetentionDays = AnsiConsole.Prompt(new TextPrompt<int>("Log backup retention (days):").DefaultValue(c.LogRetentionDays));
|
||||||
|
|
||||||
|
AnsiConsole.MarkupLine("[grey]Current log backup paths:[/]");
|
||||||
|
c.LogBackupPaths.ForEach(p => AnsiConsole.MarkupLine($" [grey]- {p}[/]"));
|
||||||
|
|
||||||
|
while (AnsiConsole.Confirm("Add a log backup path?", defaultValue: c.LogBackupPaths.Count == 0))
|
||||||
|
{
|
||||||
|
var p = AnsiConsole.Prompt(new TextPrompt<string>("Log backup path:"));
|
||||||
|
if (!string.IsNullOrWhiteSpace(p)) c.LogBackupPaths.Add(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void ConfigureAurora(AuroraConfig a, HanaConfig hana)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule("[green]Aurora[/]").RuleStyle("green"));
|
||||||
|
a.Enabled = AnsiConsole.Confirm("Enable Aurora schema refresh?", a.Enabled);
|
||||||
|
if (!a.Enabled) { AnsiConsole.WriteLine(); return; }
|
||||||
|
|
||||||
|
a.ScheduleHour = AnsiConsole.Prompt(new TextPrompt<int>("Run hour (0-23):").DefaultValue(a.ScheduleHour));
|
||||||
|
a.ScheduleMinute = AnsiConsole.Prompt(new TextPrompt<int>("Run minute (0-59):").DefaultValue(a.ScheduleMinute));
|
||||||
|
a.AdminUserKey = AnsiConsole.Prompt(new TextPrompt<string>("Admin hdbuserstore key:").DefaultValue(a.AdminUserKey));
|
||||||
|
a.SourceSchema = AnsiConsole.Prompt(new TextPrompt<string>("Source schema name:").DefaultValue(a.SourceSchema));
|
||||||
|
a.AuroraUser = AnsiConsole.Prompt(new TextPrompt<string>("Aurora target user:").DefaultValue(a.AuroraUser));
|
||||||
|
a.BackupBasePath = AnsiConsole.Prompt(new TextPrompt<string>("Temp export base path:").DefaultValue(a.BackupBasePath));
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void ConfigureFirewall(FirewallConfig f)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule("[green]Firewall[/]").RuleStyle("green"));
|
||||||
|
f.Enabled = AnsiConsole.Confirm("Enable scheduled firewall rule application?", f.Enabled);
|
||||||
|
if (!f.Enabled) { AnsiConsole.WriteLine(); return; }
|
||||||
|
|
||||||
|
f.ScheduleHour = AnsiConsole.Prompt(new TextPrompt<int>("Run hour (0-23):").DefaultValue(f.ScheduleHour));
|
||||||
|
f.ScheduleMinute = AnsiConsole.Prompt(new TextPrompt<int>("Run minute (0-59):").DefaultValue(f.ScheduleMinute));
|
||||||
|
AnsiConsole.MarkupLine("[grey]Note: Firewall rules are configured via [cyan]hanatoolbox firewall[/][/]");
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void ConfigureMonitor(MonitorConfig m)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule("[green]Monitor[/]").RuleStyle("green"));
|
||||||
|
m.Enabled = AnsiConsole.Confirm("Enable monitor (runs every cron tick)?", m.Enabled);
|
||||||
|
if (!m.Enabled) { AnsiConsole.WriteLine(); return; }
|
||||||
|
|
||||||
|
m.HanaUserKey = AnsiConsole.Prompt(new TextPrompt<string>("Monitor hdbuserstore key:").DefaultValue(m.HanaUserKey));
|
||||||
|
m.CompanyName = AnsiConsole.Prompt(new TextPrompt<string>("Company name (for alerts):").DefaultValue(m.CompanyName));
|
||||||
|
m.SapcontrolPath = AnsiConsole.Prompt(new TextPrompt<string>("sapcontrol path:").DefaultValue(m.SapcontrolPath));
|
||||||
|
m.DiskUsageThresholdPercent = AnsiConsole.Prompt(new TextPrompt<int>("Disk usage alert threshold (%):").DefaultValue(m.DiskUsageThresholdPercent));
|
||||||
|
m.BackupThresholdHours = AnsiConsole.Prompt(new TextPrompt<int>("Max backup age (hours):").DefaultValue(m.BackupThresholdHours));
|
||||||
|
|
||||||
|
AnsiConsole.MarkupLine("[grey]Current monitored directories:[/]");
|
||||||
|
m.DirectoriesToMonitor.ForEach(d => AnsiConsole.MarkupLine($" [grey]- {d}[/]"));
|
||||||
|
while (AnsiConsole.Confirm("Add a directory to monitor?", defaultValue: false))
|
||||||
|
{
|
||||||
|
var d = AnsiConsole.Prompt(new TextPrompt<string>("Directory path:"));
|
||||||
|
if (!string.IsNullOrWhiteSpace(d)) m.DirectoriesToMonitor.Add(d);
|
||||||
|
}
|
||||||
|
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
}
|
||||||
|
}
|
||||||
149
Tui/FirewallTui.cs
Normal file
149
Tui/FirewallTui.cs
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
using Spectre.Console;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Tui;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Interactive Firewall TUI using Spectre.Console.
|
||||||
|
/// Mirrors the firewalld.sh interactive flow.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class FirewallTui(
|
||||||
|
FirewallService firewallService,
|
||||||
|
AppLogger _logger)
|
||||||
|
{
|
||||||
|
public async Task<FirewallConfig?> RunAsync(
|
||||||
|
FirewallConfig current, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
AnsiConsole.Clear();
|
||||||
|
AnsiConsole.Write(new Rule("[cyan]SAP B1 Firewall Configurator[/]").RuleStyle("cyan"));
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
// Flush question
|
||||||
|
var flush = AnsiConsole.Confirm("Flush (remove) all current firewall rules before applying? [creates clean slate]",
|
||||||
|
defaultValue: current.FlushBeforeApply);
|
||||||
|
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
// Per-service configuration
|
||||||
|
var entries = new List<FirewallServiceEntry>();
|
||||||
|
foreach (var svc in current.Services)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule($"[green]{svc.Name}[/] [grey]({string.Join(", ", svc.Ports)})[/]").RuleStyle("grey"));
|
||||||
|
|
||||||
|
var choice = AnsiConsole.Prompt(
|
||||||
|
new SelectionPrompt<string>()
|
||||||
|
.Title("Access rule:")
|
||||||
|
.AddChoices("Allow from ANYWHERE (Public)", "Restrict to SPECIFIC IPs", "Skip / Block")
|
||||||
|
.HighlightStyle("cyan"));
|
||||||
|
|
||||||
|
var decision = choice switch
|
||||||
|
{
|
||||||
|
"Allow from ANYWHERE (Public)" => FirewallDecision.All,
|
||||||
|
"Restrict to SPECIFIC IPs" => FirewallDecision.Ip,
|
||||||
|
_ => FirewallDecision.Skip
|
||||||
|
};
|
||||||
|
|
||||||
|
var ips = new List<string>();
|
||||||
|
if (decision == FirewallDecision.Ip)
|
||||||
|
{
|
||||||
|
var existing = string.Join(", ", svc.AllowedIps);
|
||||||
|
var ipInput = AnsiConsole.Prompt(
|
||||||
|
new TextPrompt<string>("Enter IPs or subnets (comma-separated):")
|
||||||
|
.DefaultValue(existing.Length > 0 ? existing : "192.168.1.1")
|
||||||
|
.AllowEmpty());
|
||||||
|
|
||||||
|
ips = ipInput
|
||||||
|
.Split(',', StringSplitOptions.RemoveEmptyEntries)
|
||||||
|
.Select(i => i.Trim())
|
||||||
|
.Where(i => i.Length > 0)
|
||||||
|
.ToList();
|
||||||
|
}
|
||||||
|
|
||||||
|
entries.Add(new FirewallServiceEntry
|
||||||
|
{
|
||||||
|
Name = svc.Name,
|
||||||
|
Ports = svc.Ports,
|
||||||
|
Decision = decision,
|
||||||
|
AllowedIps = ips
|
||||||
|
});
|
||||||
|
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summary table
|
||||||
|
AnsiConsole.Write(new Rule("[yellow]Summary[/]").RuleStyle("yellow"));
|
||||||
|
var table = new Table()
|
||||||
|
.AddColumn("Service")
|
||||||
|
.AddColumn("Action")
|
||||||
|
.AddColumn("Details");
|
||||||
|
|
||||||
|
foreach (var e in entries)
|
||||||
|
{
|
||||||
|
var (action, details) = e.Decision switch
|
||||||
|
{
|
||||||
|
FirewallDecision.All => ("[red]Open Public[/]", "0.0.0.0/0"),
|
||||||
|
FirewallDecision.Ip => ("[green]Restricted[/]", string.Join(", ", e.AllowedIps)),
|
||||||
|
_ => ("[grey]Blocked/Skip[/]", "-")
|
||||||
|
};
|
||||||
|
table.AddRow(e.Name.Length > 35 ? e.Name[..35] : e.Name, action, details);
|
||||||
|
}
|
||||||
|
AnsiConsole.Write(table);
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
if (!AnsiConsole.Confirm("Apply and save these rules?", defaultValue: true))
|
||||||
|
{
|
||||||
|
AnsiConsole.MarkupLine("[yellow]Aborted.[/]");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
var updated = new FirewallConfig
|
||||||
|
{
|
||||||
|
FlushBeforeApply = flush,
|
||||||
|
Services = entries,
|
||||||
|
Enabled = current.Enabled,
|
||||||
|
ScheduleHour = current.ScheduleHour,
|
||||||
|
ScheduleMinute = current.ScheduleMinute
|
||||||
|
};
|
||||||
|
|
||||||
|
// Apply rules
|
||||||
|
await firewallService.ApplyAsync(updated, ct);
|
||||||
|
|
||||||
|
// Safety revert window
|
||||||
|
AnsiConsole.MarkupLine("[yellow]Rules applied. You have 15 seconds to confirm your connection still works.[/]");
|
||||||
|
AnsiConsole.MarkupLine("Press [cyan]ENTER[/] to keep changes permanently, or wait to auto-revert.");
|
||||||
|
|
||||||
|
var confirmed = await WaitForConfirmAsync(15, ct);
|
||||||
|
if (!confirmed)
|
||||||
|
{
|
||||||
|
AnsiConsole.MarkupLine("[red]Timeout — reverting firewall to permanent config...[/]");
|
||||||
|
await AnsiConsole.Status().StartAsync("Reverting...", async _ =>
|
||||||
|
{
|
||||||
|
await Task.Run(() =>
|
||||||
|
System.Diagnostics.Process.Start("/bin/bash", "-c firewall-cmd --reload")?.WaitForExit(), ct);
|
||||||
|
});
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
AnsiConsole.MarkupLine("[green]Changes confirmed and saved permanently.[/]");
|
||||||
|
return updated;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static async Task<bool> WaitForConfirmAsync(int seconds, CancellationToken ct)
|
||||||
|
{
|
||||||
|
var cts = CancellationTokenSource.CreateLinkedTokenSource(ct);
|
||||||
|
cts.CancelAfter(TimeSpan.FromSeconds(seconds));
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
await Task.Run(() => Console.ReadLine(), cts.Token);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
catch (OperationCanceledException)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
134
Tui/KeyManagerTui.cs
Normal file
134
Tui/KeyManagerTui.cs
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
using Spectre.Console;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Tui;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Interactive TUI for managing hdbuserstore keys.
|
||||||
|
/// Mirrors keymanager.sh flow: Create / Delete / Test.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class KeyManagerTui(
|
||||||
|
KeyManagerService keyService,
|
||||||
|
IHdbClientLocator locator,
|
||||||
|
AppLogger _logger)
|
||||||
|
{
|
||||||
|
public async Task RunAsync(HanaConfig hana, string sid, CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
var hdbsql = locator.LocateHdbsql(hana.HdbsqlPath, sid, hana.InstanceNumber);
|
||||||
|
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
AnsiConsole.Clear();
|
||||||
|
AnsiConsole.Write(new Rule("[blue]SAP HANA Secure User Store Key Manager[/]").RuleStyle("blue"));
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
var choice = AnsiConsole.Prompt(
|
||||||
|
new SelectionPrompt<string>()
|
||||||
|
.Title("Select an action:")
|
||||||
|
.AddChoices("Create a New Key", "Delete an Existing Key",
|
||||||
|
"Test an Existing Key", "Exit")
|
||||||
|
.HighlightStyle("cyan"));
|
||||||
|
|
||||||
|
switch (choice)
|
||||||
|
{
|
||||||
|
case "Create a New Key":
|
||||||
|
await CreateKeyAsync(hdbsql, hana, sid, ct);
|
||||||
|
break;
|
||||||
|
case "Delete an Existing Key":
|
||||||
|
await DeleteKeyAsync(sid, ct);
|
||||||
|
break;
|
||||||
|
case "Test an Existing Key":
|
||||||
|
await TestKeyAsync(hdbsql, sid, ct);
|
||||||
|
break;
|
||||||
|
case "Exit":
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
AnsiConsole.MarkupLine("\n[grey]Press any key to continue...[/]");
|
||||||
|
Console.ReadKey(intercept: true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task CreateKeyAsync(
|
||||||
|
string hdbsql, HanaConfig hana, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Create New Key[/]").RuleStyle("blue"));
|
||||||
|
|
||||||
|
var keyName = AnsiConsole.Prompt(new TextPrompt<string>("Key name:").DefaultValue("CRONKEY"));
|
||||||
|
var host = AnsiConsole.Prompt(new TextPrompt<string>("HANA host:").DefaultValue(System.Net.Dns.GetHostName()));
|
||||||
|
var instance = AnsiConsole.Prompt(new TextPrompt<string>("Instance number:").DefaultValue(hana.InstanceNumber));
|
||||||
|
var isSystemDb = AnsiConsole.Confirm("Connecting to SYSTEMDB?", defaultValue: false);
|
||||||
|
|
||||||
|
string connStr;
|
||||||
|
if (isSystemDb)
|
||||||
|
{
|
||||||
|
connStr = $"{host}:3{instance}13";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
var tenant = AnsiConsole.Prompt(new TextPrompt<string>("Tenant DB name:").DefaultValue(sid.ToUpperInvariant()));
|
||||||
|
connStr = $"{host}:3{instance}15@{tenant}";
|
||||||
|
}
|
||||||
|
|
||||||
|
var user = AnsiConsole.Prompt(new TextPrompt<string>("Database user:").DefaultValue("SYSTEM"));
|
||||||
|
var pass = AnsiConsole.Prompt(new TextPrompt<string>("Password:").Secret());
|
||||||
|
|
||||||
|
AnsiConsole.MarkupLine($"\n[yellow]Command preview:[/] hdbuserstore SET \"{keyName}\" \"{connStr}\" \"{user}\" <password>");
|
||||||
|
if (!AnsiConsole.Confirm("Execute?", defaultValue: true)) return;
|
||||||
|
|
||||||
|
var created = await keyService.CreateKeyAsync(keyName, connStr, user, pass, sid, ct);
|
||||||
|
if (!created) return;
|
||||||
|
|
||||||
|
// Auto-test and rollback on failure
|
||||||
|
var ok = await keyService.TestKeyAsync(hdbsql, keyName, sid, ct);
|
||||||
|
if (!ok)
|
||||||
|
{
|
||||||
|
AnsiConsole.MarkupLine("[yellow]Rolling back: deleting key due to connection failure...[/]");
|
||||||
|
await keyService.DeleteKeyAsync(keyName, sid, ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task DeleteKeyAsync(string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule("[red]Delete Key[/]").RuleStyle("red"));
|
||||||
|
|
||||||
|
var keys = await keyService.ListKeysAsync(sid, ct);
|
||||||
|
if (keys.Count == 0)
|
||||||
|
{
|
||||||
|
AnsiConsole.MarkupLine("[yellow]No keys found.[/]");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var key = AnsiConsole.Prompt(
|
||||||
|
new SelectionPrompt<string>()
|
||||||
|
.Title("Select key to delete:")
|
||||||
|
.AddChoices(keys)
|
||||||
|
.HighlightStyle("red"));
|
||||||
|
|
||||||
|
if (!AnsiConsole.Confirm($"Permanently delete '{key}'?", defaultValue: false)) return;
|
||||||
|
await keyService.DeleteKeyAsync(key, sid, ct);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async Task TestKeyAsync(string hdbsql, string sid, CancellationToken ct)
|
||||||
|
{
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Test Key[/]").RuleStyle("blue"));
|
||||||
|
|
||||||
|
var keys = await keyService.ListKeysAsync(sid, ct);
|
||||||
|
if (keys.Count == 0)
|
||||||
|
{
|
||||||
|
AnsiConsole.MarkupLine("[yellow]No keys found.[/]");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var key = AnsiConsole.Prompt(
|
||||||
|
new SelectionPrompt<string>()
|
||||||
|
.Title("Select key to test:")
|
||||||
|
.AddChoices(keys)
|
||||||
|
.HighlightStyle("cyan"));
|
||||||
|
|
||||||
|
await keyService.TestKeyAsync(hdbsql, key, sid, ct);
|
||||||
|
}
|
||||||
|
}
|
||||||
112
Tui/OnboardTui.cs
Normal file
112
Tui/OnboardTui.cs
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
using HanaToolbox.Config;
|
||||||
|
using HanaToolbox.Logging;
|
||||||
|
using HanaToolbox.Services;
|
||||||
|
using HanaToolbox.Services.Interfaces;
|
||||||
|
using Spectre.Console;
|
||||||
|
|
||||||
|
namespace HanaToolbox.Tui;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Full guided onboarding wizard.
|
||||||
|
/// Walks through every setting, creates hdbuserstore keys, and writes hanatoolbox.json.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class OnboardTui(
|
||||||
|
KeyManagerTui keyManagerTui,
|
||||||
|
CronSetupTui cronSetupTui,
|
||||||
|
FirewallTui firewallTui,
|
||||||
|
AppLogger _logger)
|
||||||
|
{
|
||||||
|
public async Task RunAsync(CancellationToken ct = default)
|
||||||
|
{
|
||||||
|
AnsiConsole.Clear();
|
||||||
|
AnsiConsole.Write(new FigletText("HanaToolbox").Color(Color.Cyan1));
|
||||||
|
AnsiConsole.Write(new Rule("[cyan]Initial Setup Wizard[/]").RuleStyle("cyan"));
|
||||||
|
AnsiConsole.MarkupLine("[grey]This wizard will configure HanaToolbox for this system.[/]\n");
|
||||||
|
|
||||||
|
// Root check
|
||||||
|
if (Environment.UserName != "root")
|
||||||
|
{
|
||||||
|
AnsiConsole.MarkupLine("[red]Warning: Not running as root. Some operations may fail.[/]");
|
||||||
|
if (!AnsiConsole.Confirm("Continue anyway?", defaultValue: false)) return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = ConfigService.Exists() ? ConfigService.Load() : new AppConfig();
|
||||||
|
|
||||||
|
// Step 1: HANA global settings
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Step 1 of 7 — HANA Settings[/]").RuleStyle("blue"));
|
||||||
|
config.Hana.Sid = AnsiConsole.Prompt(new TextPrompt<string>("HANA SID:").DefaultValue(config.Hana.Sid));
|
||||||
|
config.Hana.InstanceNumber = AnsiConsole.Prompt(new TextPrompt<string>("Instance number:").DefaultValue(config.Hana.InstanceNumber));
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
// Step 2: hdbuserstore keys
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Step 2 of 7 — Key Manager[/]").RuleStyle("blue"));
|
||||||
|
AnsiConsole.MarkupLine("[grey]Set up hdbuserstore keys needed for automated operations.[/]");
|
||||||
|
if (AnsiConsole.Confirm("Open Key Manager now?", defaultValue: true))
|
||||||
|
await keyManagerTui.RunAsync(config.Hana, config.Hana.Sid, ct);
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
// Step 3: Cron tasks
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Step 3 of 7 — Cron Task Settings[/]").RuleStyle("blue"));
|
||||||
|
config = cronSetupTui.Run(config);
|
||||||
|
|
||||||
|
// Step 4: Firewall
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Step 4 of 7 — Firewall[/]").RuleStyle("blue"));
|
||||||
|
if (AnsiConsole.Confirm("Configure firewall rules now?", defaultValue: true))
|
||||||
|
{
|
||||||
|
var updated = await firewallTui.RunAsync(config.Firewall, ct);
|
||||||
|
if (updated != null) config.Firewall = updated;
|
||||||
|
}
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
// Step 5: Binary paths (optional overrides)
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Step 5 of 7 — Binary Paths (optional)[/]").RuleStyle("blue"));
|
||||||
|
AnsiConsole.MarkupLine("[grey]Leave empty to use auto-detection (which, /usr/sap/hdbclient, etc.)[/]");
|
||||||
|
var hdbsqlOverride = AnsiConsole.Prompt(
|
||||||
|
new TextPrompt<string>("hdbsql path override (empty = auto):").AllowEmpty().DefaultValue(""));
|
||||||
|
var hdbusOverride = AnsiConsole.Prompt(
|
||||||
|
new TextPrompt<string>("hdbuserstore path override (empty = auto):").AllowEmpty().DefaultValue(""));
|
||||||
|
if (!string.IsNullOrWhiteSpace(hdbsqlOverride)) config.Hana.HdbsqlPath = hdbsqlOverride;
|
||||||
|
if (!string.IsNullOrWhiteSpace(hdbusOverride)) config.Hana.HdbuserstorePath = hdbusOverride;
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
// Step 6: ntfy notifications
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Step 6 of 7 — Notifications (ntfy)[/]").RuleStyle("blue"));
|
||||||
|
AnsiConsole.MarkupLine("[grey]HanaToolbox sends alerts via ntfy.sh (or a self-hosted ntfy server).[/]");
|
||||||
|
AnsiConsole.MarkupLine("[grey]Leave the token empty to disable notifications.[/]");
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
config.Ntfy.Url = AnsiConsole.Prompt(
|
||||||
|
new TextPrompt<string>("ntfy server URL (topic included):")
|
||||||
|
.DefaultValue(config.Ntfy.Url));
|
||||||
|
|
||||||
|
config.Ntfy.Token = AnsiConsole.Prompt(
|
||||||
|
new TextPrompt<string>("ntfy access token (empty = no auth):")
|
||||||
|
.Secret()
|
||||||
|
.AllowEmpty()
|
||||||
|
.DefaultValue(string.IsNullOrWhiteSpace(config.Ntfy.Token) ? string.Empty : "(existing)"));
|
||||||
|
|
||||||
|
// If user accepted "(existing)" prompt without typing, keep the real token
|
||||||
|
if (config.Ntfy.Token == "(existing)")
|
||||||
|
config.Ntfy.Token = ConfigService.Exists() ? ConfigService.Load().Ntfy.Token : string.Empty;
|
||||||
|
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
// Step 7: Save
|
||||||
|
AnsiConsole.Write(new Rule("[blue]Step 7 of 7 — Finalize[/]").RuleStyle("blue"));
|
||||||
|
|
||||||
|
var crontabLine = $"* * * * * root /usr/local/bin/hanatoolbox cron";
|
||||||
|
AnsiConsole.MarkupLine("[grey]Add the following line to your system crontab ([cyan]/etc/crontab[/] or [cyan]/etc/cron.d/hanatoolbox[/]):[/]");
|
||||||
|
AnsiConsole.MarkupLine($"[cyan]{crontabLine}[/]");
|
||||||
|
AnsiConsole.WriteLine();
|
||||||
|
|
||||||
|
if (!AnsiConsole.Confirm("Save configuration to /etc/hanatoolbox/hanatoolbox.json?", defaultValue: true))
|
||||||
|
{
|
||||||
|
AnsiConsole.MarkupLine("[yellow]Aborted. No changes saved.[/]");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ConfigService.Save(config);
|
||||||
|
AnsiConsole.MarkupLine("[green]✅ Configuration saved successfully![/]");
|
||||||
|
AnsiConsole.MarkupLine($"[grey]Config file: /etc/hanatoolbox/hanatoolbox.json[/]");
|
||||||
|
}
|
||||||
|
}
|
||||||
114
templates/aurora.sh
Normal file
114
templates/aurora.sh
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Version: 2.5.1
|
||||||
|
# Author: Tomi Eckert
|
||||||
|
# ==============================================================================
|
||||||
|
# Aurora Refresh Script
|
||||||
|
#
|
||||||
|
# Performs an automated refresh of a SAP HANA schema using hanatool.sh.
|
||||||
|
# It exports a production schema and re-imports it under a new name ("Aurora")
|
||||||
|
# to create an up-to-date, non-production environment for testing.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
# --- Configuration and Setup ---
|
||||||
|
|
||||||
|
# Find the script's own directory to locate the config file and hanatool.sh
|
||||||
|
SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)
|
||||||
|
CONFIG_FILE="${SCRIPT_DIR}/aurora.conf"
|
||||||
|
HANATOOL_PATH="${SCRIPT_DIR}/hanatool.sh"
|
||||||
|
|
||||||
|
# Check for config file and source it
|
||||||
|
if [[ -f "$CONFIG_FILE" ]]; then
|
||||||
|
source "$CONFIG_FILE"
|
||||||
|
else
|
||||||
|
echo "❌ Error: Configuration file not found at '${CONFIG_FILE}'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if hanatool.sh executable exists
|
||||||
|
if [[ ! -x "$HANATOOL_PATH" ]]; then
|
||||||
|
echo "❌ Error: hanatool.sh not found or not executable at '${HANATOOL_PATH}'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Derived Variables ---
|
||||||
|
AURORA_SCHEMA="${SOURCE_SCHEMA}_AURORA"
|
||||||
|
EXPORT_DIR="${BACKUP_BASE_DIR}/${AURORA_SCHEMA}_TEMP_EXPORT"
|
||||||
|
|
||||||
|
# --- Main Execution ---
|
||||||
|
echo "🚀 Starting Aurora Refresh for '${SOURCE_SCHEMA}' using hanatool.sh..."
|
||||||
|
|
||||||
|
# 1. Drop the old Aurora schema if it exists.
|
||||||
|
echo "🗑️ Dropping old schema '${AURORA_SCHEMA}' (if it exists)..."
|
||||||
|
"$HDBSQL" -U "$DB_ADMIN_KEY" "DROP SCHEMA \"${AURORA_SCHEMA}\" CASCADE" >/dev/null 2>&1 || echo " -> Schema did not exist. Continuing."
|
||||||
|
|
||||||
|
# 2. Prepare the temporary export directory.
|
||||||
|
echo "📁 Preparing temporary export directory..."
|
||||||
|
rm -rf "$EXPORT_DIR"
|
||||||
|
mkdir -p "$EXPORT_DIR"
|
||||||
|
|
||||||
|
# 3. Export the source schema using hanatool.sh
|
||||||
|
echo "⬇️ Exporting source schema '${SOURCE_SCHEMA}'..."
|
||||||
|
"$HANATOOL_PATH" "$DB_ADMIN_KEY" export "$SOURCE_SCHEMA" "$EXPORT_DIR" -t "$THREADS"
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "❌ Error: Export failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 4. Import the data into the new Aurora schema using hanatool.sh
|
||||||
|
echo "⬆️ Importing data and renaming schema to '${AURORA_SCHEMA}'..."
|
||||||
|
"$HANATOOL_PATH" "$DB_ADMIN_KEY" import-rename "$SOURCE_SCHEMA" "$AURORA_SCHEMA" "$EXPORT_DIR" -t "$THREADS"
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "❌ Error: Import failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 5. Update company name in CINF and OADM tables.
|
||||||
|
echo "✍️ Updating company name fields in the new schema..."
|
||||||
|
|
||||||
|
# First, get the original company name from the source schema.
|
||||||
|
echo " -> Fetching original company name from '${SOURCE_SCHEMA}'..."
|
||||||
|
ORIGINAL_COMPNY_NAME=$("$HDBSQL" -U "$DB_ADMIN_KEY" "SELECT \"CompnyName\" FROM \"${SOURCE_SCHEMA}\".\"CINF\"" | sed -n '2p' | tr -d '"' | xargs)
|
||||||
|
|
||||||
|
# Construct the new name in the desired format.
|
||||||
|
DATE_STAMP=$(date "+%Y-%m-%d")
|
||||||
|
NEW_COMPNY_NAME="AURORA - ${ORIGINAL_COMPNY_NAME} - ${DATE_STAMP}"
|
||||||
|
echo " -> New company name set to: '${NEW_COMPNY_NAME}'"
|
||||||
|
|
||||||
|
echo " -> Updating CINF table..."
|
||||||
|
"$HDBSQL" -U "$DB_ADMIN_KEY" "UPDATE \"${AURORA_SCHEMA}\".CINF SET \"CompnyName\" = '${NEW_COMPNY_NAME}';" >/dev/null
|
||||||
|
|
||||||
|
echo " -> Updating OADM table..."
|
||||||
|
"$HDBSQL" -U "$DB_ADMIN_KEY" "UPDATE \"${AURORA_SCHEMA}\".OADM SET \"CompnyName\" = '${NEW_COMPNY_NAME}', \"PrintHeadr\" = '${NEW_COMPNY_NAME}';" >/dev/null
|
||||||
|
echo " -> Company info updated."
|
||||||
|
|
||||||
|
# 6. Grant privileges to the read/write user.
|
||||||
|
echo "🔑 Granting ALL privileges on '${AURORA_SCHEMA}' to '${AURORA_USER}'..."
|
||||||
|
"$HDBSQL" -U "$DB_ADMIN_KEY" "GRANT ALL PRIVILEGES ON SCHEMA \"${AURORA_SCHEMA}\" TO \"${AURORA_USER}\";" >/dev/null
|
||||||
|
echo " -> Privileges granted."
|
||||||
|
|
||||||
|
# 7. Run post-import SQL scripts, if any are defined.
|
||||||
|
if [[ -n "$POST_IMPORT_SQL" ]]; then
|
||||||
|
echo "⚙️ Running post-import SQL scripts..."
|
||||||
|
for sql_file in $POST_IMPORT_SQL; do
|
||||||
|
full_path="${SQL_SCRIPTS_ROOT}/${sql_file}"
|
||||||
|
if [[ -f "$full_path" ]]; then
|
||||||
|
echo " -> Executing: ${sql_file}"
|
||||||
|
"$HDBSQL" -U "$DB_ADMIN_KEY" -I "$full_path"
|
||||||
|
else
|
||||||
|
echo " -> ⚠️ WARNING: Script not found: ${full_path}" >&2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo "ℹ️ No post-import SQL scripts to run."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 8. Clean up the temporary export files.
|
||||||
|
echo "🧹 Cleaning up temporary directory '${EXPORT_DIR}'..."
|
||||||
|
rm -rf "$EXPORT_DIR"
|
||||||
|
echo " -> Cleanup complete."
|
||||||
|
|
||||||
|
echo "--------------------------------------------------------"
|
||||||
|
echo "✅ Aurora Refresh finished successfully!"
|
||||||
|
echo
|
||||||
|
|
||||||
|
exit 0
|
||||||
133
templates/backup.sh
Normal file
133
templates/backup.sh
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Version: 1.1.0
|
||||||
|
# Author: Tomi Eckert
|
||||||
|
# ==============================================================================
|
||||||
|
# SAP HANA Backup Script
|
||||||
|
#
|
||||||
|
# Performs schema exports for one or more schemas and/or tenant backups for a
|
||||||
|
# SAP HANA database using hanatool.sh. Designed to be executed via a cronjob.
|
||||||
|
# Reads all settings from the backup.conf file in the same directory.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
# --- Configuration and Setup ---
|
||||||
|
|
||||||
|
# Find the script's own directory to locate the config file and hanatool.sh
|
||||||
|
SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)
|
||||||
|
CONFIG_FILE="${SCRIPT_DIR}/backup.conf"
|
||||||
|
HANATOOL_PATH="${SCRIPT_DIR}/hanatool.sh" # Assuming hanatool.sh is in the parent directory
|
||||||
|
|
||||||
|
# Check for config file and source it
|
||||||
|
if [[ -f "$CONFIG_FILE" ]]; then
|
||||||
|
source "$CONFIG_FILE"
|
||||||
|
else
|
||||||
|
echo "❌ Error: Configuration file not found at '${CONFIG_FILE}'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if hanatool.sh executable exists
|
||||||
|
if [[ ! -x "$HANATOOL_PATH" ]]; then
|
||||||
|
echo "❌ Error: hanatool.sh not found or not executable at '${HANATOOL_PATH}'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Main Execution ---
|
||||||
|
|
||||||
|
echo "⚙️ Starting HANA backup process using hanatool.sh..."
|
||||||
|
|
||||||
|
mkdir -p "$BACKUP_BASE_DIR"
|
||||||
|
|
||||||
|
SCHEMA_EXPORT_OPTIONS=""
|
||||||
|
|
||||||
|
case "$BACKUP_TYPE" in
|
||||||
|
schema)
|
||||||
|
if [[ -z "$SCHEMA_NAMES" ]]; then
|
||||||
|
echo " ⚠️ Warning: SCHEMA_NAMES variable is not set in config. Skipping schema export."
|
||||||
|
else
|
||||||
|
echo "🔎 Found schemas to export: ${SCHEMA_NAMES}"
|
||||||
|
for schema in $SCHEMA_NAMES; do
|
||||||
|
echo "⬇️ Starting schema export for '${schema}'..."
|
||||||
|
SCHEMA_EXPORT_OPTIONS="$COMMON_OPTIONS"
|
||||||
|
if [[ -n "$THREADS" ]]; then
|
||||||
|
SCHEMA_EXPORT_OPTIONS+=" -t $THREADS"
|
||||||
|
fi
|
||||||
|
if [[ "$COMPRESS_SCHEMA" == "true" ]]; then
|
||||||
|
SCHEMA_EXPORT_OPTIONS+=" --compress"
|
||||||
|
fi
|
||||||
|
"$HANATOOL_PATH" "$USER_KEY" export "$schema" "${BACKUP_BASE_DIR}/schema" $SCHEMA_EXPORT_OPTIONS
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "❌ Error: Schema export for '${schema}' failed."
|
||||||
|
fi
|
||||||
|
echo "--------------------------------------------------"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
tenant)
|
||||||
|
echo "⬇️ Starting Tenant backup..."
|
||||||
|
TENANT_BACKUP_OPTIONS="$COMMON_OPTIONS"
|
||||||
|
if [[ "$COMPRESS_TENANT" == "true" ]]; then
|
||||||
|
TENANT_BACKUP_OPTIONS+=" --compress"
|
||||||
|
fi
|
||||||
|
"$HANATOOL_PATH" "$USER_KEY" backup "${BACKUP_BASE_DIR}/tenant" $TENANT_BACKUP_OPTIONS
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "❌ Error: Tenant backup failed."
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
all)
|
||||||
|
if [[ -z "$SCHEMA_NAMES" ]]; then
|
||||||
|
echo " ⚠️ Warning: SCHEMA_NAMES variable is not set in config. Skipping schema export."
|
||||||
|
else
|
||||||
|
echo "🔎 Found schemas to export: ${SCHEMA_NAMES}"
|
||||||
|
for schema in $SCHEMA_NAMES; do
|
||||||
|
echo "⬇️ Starting schema export for '${schema}'..."
|
||||||
|
SCHEMA_EXPORT_OPTIONS="$COMMON_OPTIONS"
|
||||||
|
if [[ -n "$THREADS" ]]; then
|
||||||
|
SCHEMA_EXPORT_OPTIONS+=" -t $THREADS"
|
||||||
|
fi
|
||||||
|
if [[ "$COMPRESS_SCHEMA" == "true" ]]; then
|
||||||
|
SCHEMA_EXPORT_OPTIONS+=" --compress"
|
||||||
|
fi
|
||||||
|
"$HANATOOL_PATH" "$USER_KEY" export "$schema" "${BACKUP_BASE_DIR}/schema" $SCHEMA_EXPORT_OPTIONS
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "❌ Error: Schema export for '${schema}' failed."
|
||||||
|
fi
|
||||||
|
echo "--------------------------------------------------"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "⬇️ Starting Tenant backup..."
|
||||||
|
TENANT_BACKUP_OPTIONS="$COMMON_OPTIONS"
|
||||||
|
if [[ "$COMPRESS_TENANT" == "true" ]]; then
|
||||||
|
TENANT_BACKUP_OPTIONS+=" --compress"
|
||||||
|
fi
|
||||||
|
"$HANATOOL_PATH" "$USER_KEY" backup "${BACKUP_BASE_DIR}/tenant" $TENANT_BACKUP_OPTIONS
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "❌ Error: Tenant backup failed."
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo " ❌ Error: Invalid BACKUP_TYPE '${BACKUP_TYPE}' in config. Use 'schema', 'tenant', or 'all'."
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Check if SYSTEMDB backup is enabled, regardless of BACKUP_TYPE (as long as it's not 'schema' only)
|
||||||
|
if [[ "$BACKUP_TYPE" == "tenant" || "$BACKUP_TYPE" == "all" ]]; then
|
||||||
|
if [[ "$BACKUP_SYSTEMDB" == "true" ]]; then
|
||||||
|
echo "--------------------------------------------------"
|
||||||
|
if [[ -z "$SYSTEMDB_USER_KEY" ]]; then
|
||||||
|
echo " ❌ Error: BACKUP_SYSTEMDB is true, but SYSTEMDB_USER_KEY is not set in config."
|
||||||
|
else
|
||||||
|
echo "⬇️ Starting SYSTEMDB backup..."
|
||||||
|
SYSTEMDB_BACKUP_OPTIONS="$COMMON_OPTIONS"
|
||||||
|
if [[ "$COMPRESS_TENANT" == "true" ]]; then # SYSTEMDB compression uses COMPRESS_TENANT setting
|
||||||
|
SYSTEMDB_BACKUP_OPTIONS+=" --compress"
|
||||||
|
fi
|
||||||
|
"$HANATOOL_PATH" "$SYSTEMDB_USER_KEY" backup "${BACKUP_BASE_DIR}/tenant" $SYSTEMDB_BACKUP_OPTIONS
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "❌ Error: SYSTEMDB backup failed."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "📦 Backup process complete."
|
||||||
|
echo "👋 Exiting."
|
||||||
31
templates/cleaner.sh
Normal file
31
templates/cleaner.sh
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Version: 1.1.0
|
||||||
|
# Author: Tomi Eckert
|
||||||
|
|
||||||
|
# Check if any arguments were provided
|
||||||
|
if [ "$#" -eq 0 ]; then
|
||||||
|
echo "Usage: $0 <retention_days>:<path> [<retention_days>:<path> ...]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Loop through each argument provided
|
||||||
|
for ARG in "$@"; do
|
||||||
|
# Split the argument at the first colon
|
||||||
|
IFS=':' read -r RETENTION_DAYS TARGET_DIR <<< "$ARG"
|
||||||
|
|
||||||
|
# Validate that both a retention period and a path were provided
|
||||||
|
if [ -z "$RETENTION_DAYS" ] || [ -z "$TARGET_DIR" ]; then
|
||||||
|
echo "Invalid format for argument: $ARG. Please use the format <retention_days>:<path>"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Starting cleanup of files older than $RETENTION_DAYS days in $TARGET_DIR..."
|
||||||
|
|
||||||
|
# Use find to locate and delete files, handling potential errors
|
||||||
|
find "$TARGET_DIR" -type f -mtime +"$RETENTION_DAYS" -delete -print || echo "Could not process $TARGET_DIR. Check permissions."
|
||||||
|
|
||||||
|
echo "Cleanup complete for $TARGET_DIR."
|
||||||
|
echo "--------------------------------------------------"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "All cleanup tasks finished."
|
||||||
354
templates/firewalld.sh
Normal file
354
templates/firewalld.sh
Normal file
@@ -0,0 +1,354 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# ==========================================
|
||||||
|
# INTERACTIVE FIREWALL CONFIGURATOR FOR SAP B1
|
||||||
|
# (With Save/Load State)
|
||||||
|
# ==========================================
|
||||||
|
|
||||||
|
# Configuration File
|
||||||
|
CONFIG_FILE="./firewall_state.conf"
|
||||||
|
|
||||||
|
# Colors for formatting
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# ==========================================
|
||||||
|
# SERVICE DEFINITIONS
|
||||||
|
# ==========================================
|
||||||
|
declare -a SVC_NAMES
|
||||||
|
declare -a SVC_PORTS
|
||||||
|
|
||||||
|
SVC_NAMES[0]="SAP Web Client"
|
||||||
|
SVC_PORTS[0]="443"
|
||||||
|
|
||||||
|
SVC_NAMES[1]="SAP HANA Database (System & Company DB)"
|
||||||
|
SVC_PORTS[1]="30013 30015"
|
||||||
|
|
||||||
|
SVC_NAMES[2]="SAP Business One SLD"
|
||||||
|
SVC_PORTS[2]="40000"
|
||||||
|
|
||||||
|
SVC_NAMES[3]="SAP Business One Auth"
|
||||||
|
SVC_PORTS[3]="40020"
|
||||||
|
|
||||||
|
SVC_NAMES[4]="SAP Business One Service Layer, Cockpit"
|
||||||
|
SVC_PORTS[4]="50000 4300"
|
||||||
|
|
||||||
|
SVC_NAMES[5]="SAP Host Agent"
|
||||||
|
SVC_PORTS[5]="1128 1129"
|
||||||
|
|
||||||
|
SVC_NAMES[6]="SSH Remote Access"
|
||||||
|
SVC_PORTS[6]="22"
|
||||||
|
|
||||||
|
SVC_NAMES[7]="SMB / B1_SHR (File Sharing)"
|
||||||
|
SVC_PORTS[7]="139 445"
|
||||||
|
|
||||||
|
# Arrays to store user decisions
|
||||||
|
declare -a CONFIG_DECISION # "ALL", "IP", "SKIP"
|
||||||
|
declare -a CONFIG_IPS # Stores the IP string if "IP" is chosen
|
||||||
|
DO_FLUSH=false
|
||||||
|
|
||||||
|
# ==========================================
|
||||||
|
# HELPER FUNCTIONS
|
||||||
|
# ==========================================
|
||||||
|
|
||||||
|
print_header() {
|
||||||
|
clear
|
||||||
|
echo -e "${CYAN}==========================================${NC}"
|
||||||
|
echo -e "${CYAN} SAP B1 Interactive Firewall Setup ${NC}"
|
||||||
|
echo -e "${CYAN}==========================================${NC}"
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
save_config() {
|
||||||
|
echo "# SAP B1 Firewall State - Do not edit manually unless you know what you are doing" > "$CONFIG_FILE"
|
||||||
|
for i in "${!SVC_NAMES[@]}"; do
|
||||||
|
# Use simple variable assignment format
|
||||||
|
echo "SAVED_DECISION[$i]=\"${CONFIG_DECISION[$i]}\"" >> "$CONFIG_FILE"
|
||||||
|
echo "SAVED_IPS[$i]=\"${CONFIG_IPS[$i]}\"" >> "$CONFIG_FILE"
|
||||||
|
done
|
||||||
|
echo "Config state saved to $CONFIG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
load_config() {
|
||||||
|
if [ -f "$CONFIG_FILE" ]; then
|
||||||
|
echo -e "${GREEN}Found saved configuration file.${NC}"
|
||||||
|
source "$CONFIG_FILE"
|
||||||
|
# Map SAVED variables to current CONFIG variables
|
||||||
|
for i in "${!SVC_NAMES[@]}"; do
|
||||||
|
CONFIG_DECISION[$i]="${SAVED_DECISION[$i]}"
|
||||||
|
CONFIG_IPS[$i]="${SAVED_IPS[$i]}"
|
||||||
|
done
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ==========================================
|
||||||
|
# INITIAL SETUP
|
||||||
|
# ==========================================
|
||||||
|
print_header
|
||||||
|
|
||||||
|
# 1. Flush Question (First)
|
||||||
|
echo -e "${YELLOW}Existing Configuration:${NC}"
|
||||||
|
echo "Do you want to FLUSH (remove) all currently active firewall rules before starting?"
|
||||||
|
echo "This ensures a clean slate. (Only affects Runtime, not Permanent config)"
|
||||||
|
read -p "Flush all current rules? [Y/n]: " flush_choice
|
||||||
|
flush_choice=${flush_choice:-Y} # Default to Yes
|
||||||
|
|
||||||
|
if [[ "$flush_choice" =~ ^[Yy]$ ]]; then
|
||||||
|
DO_FLUSH=true
|
||||||
|
echo -e "-> ${RED}Will flush all rules (Clean Slate).${NC}"
|
||||||
|
else
|
||||||
|
echo -e "-> Keeping existing rules (Appending new ones)."
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# 2. Load Configuration (Second)
|
||||||
|
HAS_CONFIG=false
|
||||||
|
if load_config; then
|
||||||
|
HAS_CONFIG=true
|
||||||
|
echo "Previous settings loaded. You can press ENTER to accept defaults during selection."
|
||||||
|
else
|
||||||
|
echo "No previous configuration found. Starting fresh."
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# ==========================================
|
||||||
|
# CONFIGURATION LOOP
|
||||||
|
# ==========================================
|
||||||
|
|
||||||
|
print_header
|
||||||
|
echo -e "This script will help you configure access rules for each service."
|
||||||
|
echo -e "${YELLOW}Note: Configuration applies to RUNTIME only.${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
for i in "${!SVC_NAMES[@]}"; do
|
||||||
|
NAME="${SVC_NAMES[$i]}"
|
||||||
|
PORTS="${SVC_PORTS[$i]}"
|
||||||
|
|
||||||
|
# Get previous setting if available
|
||||||
|
PREV_DECISION="${CONFIG_DECISION[$i]}"
|
||||||
|
PREV_IPS="${CONFIG_IPS[$i]}"
|
||||||
|
|
||||||
|
# Determine default option number for UI
|
||||||
|
DEFAULT_OPT=""
|
||||||
|
DEFAULT_TXT=""
|
||||||
|
if [[ "$PREV_DECISION" == "ALL" ]]; then DEFAULT_OPT="1"; DEFAULT_TXT="[Default: 1 - Public]"; fi
|
||||||
|
if [[ "$PREV_DECISION" == "IP" ]]; then DEFAULT_OPT="2"; DEFAULT_TXT="[Default: 2 - Restricted]"; fi
|
||||||
|
if [[ "$PREV_DECISION" == "SKIP" ]]; then DEFAULT_OPT="3"; DEFAULT_TXT="[Default: 3 - Skip]"; fi
|
||||||
|
|
||||||
|
echo -e "--------------------------------------------------"
|
||||||
|
echo -e "Configuring Service: ${GREEN}$NAME${NC}"
|
||||||
|
echo -e "Ports: ${YELLOW}$PORTS${NC}"
|
||||||
|
echo "--------------------------------------------------"
|
||||||
|
echo "1) Allow from ANYWHERE (Public)"
|
||||||
|
echo "2) Restrict to SPECIFIC IPs"
|
||||||
|
echo "3) Skip / Block (Do not open)"
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
read -p "Select option (1-3) $DEFAULT_TXT: " choice
|
||||||
|
|
||||||
|
# Handle Enter key (Default)
|
||||||
|
if [[ -z "$choice" && -n "$DEFAULT_OPT" ]]; then
|
||||||
|
choice=$DEFAULT_OPT
|
||||||
|
fi
|
||||||
|
|
||||||
|
case $choice in
|
||||||
|
1)
|
||||||
|
CONFIG_DECISION[$i]="ALL"
|
||||||
|
CONFIG_IPS[$i]=""
|
||||||
|
echo -e "-> Selected: ${RED}Public Access${NC}"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
2)
|
||||||
|
CONFIG_DECISION[$i]="IP"
|
||||||
|
echo ""
|
||||||
|
echo -e "Enter IPs or Subnets separated by spaces or commas."
|
||||||
|
|
||||||
|
# Show previous IPs as default if they exist
|
||||||
|
if [[ -n "$PREV_IPS" ]]; then
|
||||||
|
echo -e "Current Saved IPs: ${CYAN}$PREV_IPS${NC}"
|
||||||
|
read -p "IPs [Press Enter to keep current]: " ip_input
|
||||||
|
if [[ -z "$ip_input" ]]; then
|
||||||
|
ip_input="$PREV_IPS"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "Example: ${CYAN}192.168.1.10, 192.168.1.20${NC}"
|
||||||
|
read -p "IPs: " ip_input
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Replace commas with spaces to sanitize
|
||||||
|
CONFIG_IPS[$i]="${ip_input//,/ }"
|
||||||
|
echo -e "-> Selected: ${GREEN}Restricted to ${CONFIG_IPS[$i]}${NC}"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
3)
|
||||||
|
CONFIG_DECISION[$i]="SKIP"
|
||||||
|
CONFIG_IPS[$i]=""
|
||||||
|
echo -e "-> Selected: ${YELLOW}Skipping${NC}"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Invalid option."
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
|
|
||||||
|
# ==========================================
|
||||||
|
# SUMMARY & CONFIRMATION
|
||||||
|
# ==========================================
|
||||||
|
|
||||||
|
print_header
|
||||||
|
echo -e "${YELLOW}SUMMARY OF PENDING CHANGES:${NC}"
|
||||||
|
echo ""
|
||||||
|
printf "%-25s | %-15s | %-30s\n" "Service" "Action" "Details"
|
||||||
|
echo "-------------------------------------------------------------------------------"
|
||||||
|
|
||||||
|
for i in "${!SVC_NAMES[@]}"; do
|
||||||
|
NAME="${SVC_NAMES[$i]}"
|
||||||
|
ACTION="${CONFIG_DECISION[$i]}"
|
||||||
|
DETAILS="${CONFIG_IPS[$i]}"
|
||||||
|
|
||||||
|
# Shorten name for table
|
||||||
|
SHORT_NAME=${NAME:0:24}
|
||||||
|
|
||||||
|
if [ "$ACTION" == "ALL" ]; then
|
||||||
|
printf "%-25s | ${RED}%-15s${NC} | %-30s\n" "$SHORT_NAME" "Open Public" "0.0.0.0/0"
|
||||||
|
elif [ "$ACTION" == "IP" ]; then
|
||||||
|
printf "%-25s | ${GREEN}%-15s${NC} | %-30s\n" "$SHORT_NAME" "Restricted" "$DETAILS"
|
||||||
|
else
|
||||||
|
printf "%-25s | ${YELLOW}%-15s${NC} | %-30s\n" "$SHORT_NAME" "Blocked/Skip" "-"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${CYAN}Global Actions:${NC}"
|
||||||
|
if [ "$DO_FLUSH" = true ]; then
|
||||||
|
echo "1. FLUSH ALL current rules (Clean Slate)."
|
||||||
|
else
|
||||||
|
echo "1. Remove specific insecure rules (0-65535) and standard SSH service."
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
read -p "Do you want to SAVE config and APPLY changes now? [Y/n]: " confirm
|
||||||
|
confirm=${confirm:-Y} # Default to Yes
|
||||||
|
|
||||||
|
if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
|
||||||
|
echo "Aborted."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ==========================================
|
||||||
|
# EXECUTION
|
||||||
|
# ==========================================
|
||||||
|
echo ""
|
||||||
|
echo "Saving configuration to $CONFIG_FILE..."
|
||||||
|
save_config
|
||||||
|
|
||||||
|
echo "Applying configurations (RUNTIME ONLY)..."
|
||||||
|
|
||||||
|
# 1. Flush or Safety Cleanup
|
||||||
|
if [ "$DO_FLUSH" = true ]; then
|
||||||
|
echo "-> Flushing active rules..."
|
||||||
|
# Flush Services
|
||||||
|
for service in $(firewall-cmd --list-services); do
|
||||||
|
firewall-cmd --remove-service="$service" >/dev/null 2>&1
|
||||||
|
done
|
||||||
|
# Flush Ports
|
||||||
|
for port in $(firewall-cmd --list-ports); do
|
||||||
|
firewall-cmd --remove-port="$port" >/dev/null 2>&1
|
||||||
|
done
|
||||||
|
# Flush Rich Rules
|
||||||
|
firewall-cmd --list-rich-rules | while read -r rule; do
|
||||||
|
if [ -n "$rule" ]; then
|
||||||
|
firewall-cmd --remove-rich-rule="$rule" >/dev/null 2>&1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
# Only remove specific conflicting rules if not flushing everything
|
||||||
|
echo "-> Cleaning up insecure rules..."
|
||||||
|
firewall-cmd --remove-port=0-65535/tcp >/dev/null 2>&1
|
||||||
|
firewall-cmd --remove-service=ssh >/dev/null 2>&1
|
||||||
|
firewall-cmd --remove-port=22/tcp >/dev/null 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 2. Loop and Apply
|
||||||
|
for i in "${!SVC_NAMES[@]}"; do
|
||||||
|
PORTS="${SVC_PORTS[$i]}"
|
||||||
|
DECISION="${CONFIG_DECISION[$i]}"
|
||||||
|
|
||||||
|
# Convert space-separated ports to array for inner loop
|
||||||
|
read -ra PORT_ARR <<< "$PORTS"
|
||||||
|
|
||||||
|
if [ "$DECISION" == "ALL" ]; then
|
||||||
|
# Open ports globally
|
||||||
|
for port in "${PORT_ARR[@]}"; do
|
||||||
|
echo " Opening $port globally..."
|
||||||
|
firewall-cmd --add-port=${port}/tcp >/dev/null
|
||||||
|
done
|
||||||
|
|
||||||
|
elif [ "$DECISION" == "IP" ]; then
|
||||||
|
# Add rich rules for specific IPs
|
||||||
|
read -ra IP_ARR <<< "${CONFIG_IPS[$i]}"
|
||||||
|
|
||||||
|
for ip in "${IP_ARR[@]}"; do
|
||||||
|
if [[ -z "$ip" ]]; then continue; fi
|
||||||
|
for port in "${PORT_ARR[@]}"; do
|
||||||
|
echo " Allowing $ip access to port $port..."
|
||||||
|
firewall-cmd --add-rich-rule="rule family='ipv4' source address='$ip' port port='$port' protocol='tcp' accept" >/dev/null
|
||||||
|
done
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${YELLOW}==========================================${NC}"
|
||||||
|
echo -e "${YELLOW} TESTING CONNECTIVITY ${NC}"
|
||||||
|
echo -e "${YELLOW}==========================================${NC}"
|
||||||
|
echo "The new rules are active. If you are locked out, DO NOTHING."
|
||||||
|
echo "The firewall will automatically REVERT in 15 seconds."
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}If you can read this and your connection works:${NC}"
|
||||||
|
echo -e "Press ${CYAN}ENTER${NC} now to CONFIRM and KEEP the changes."
|
||||||
|
echo "OR press Ctrl+C to keep changes without saving (script exits)."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if read -t 15 -p "Waiting for confirmation... "; then
|
||||||
|
# User pressed Enter (Connection is good)
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}Connection confirmed!${NC}"
|
||||||
|
echo ""
|
||||||
|
read -p "Do you want to save these rules PERMANENTLY now? [Y/n]: " save_choice
|
||||||
|
save_choice=${save_choice:-Y} # Default to Yes
|
||||||
|
|
||||||
|
if [[ "$save_choice" =~ ^[Yy]$ ]]; then
|
||||||
|
echo "Saving to permanent configuration..."
|
||||||
|
firewall-cmd --runtime-to-permanent
|
||||||
|
echo -e "${GREEN}Configuration Saved.${NC}"
|
||||||
|
else
|
||||||
|
echo "Rules kept in RUNTIME only. They will be lost on reboot."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Timeout occurred (User likely locked out)
|
||||||
|
echo ""
|
||||||
|
echo -e "${RED}Timeout reached! Reverting changes...${NC}"
|
||||||
|
|
||||||
|
# Revert Logic
|
||||||
|
echo "1. Reloading permanent configuration..."
|
||||||
|
firewall-cmd --reload >/dev/null 2>&1
|
||||||
|
|
||||||
|
echo "2. Ensuring SSH is accessible (Failsafe)..."
|
||||||
|
firewall-cmd --add-service=ssh >/dev/null 2>&1
|
||||||
|
|
||||||
|
echo -e "${YELLOW}Firewall reverted to previous state (plus global SSH allow).${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
||||||
471
templates/hanatool.sh
Normal file
471
templates/hanatool.sh
Normal file
@@ -0,0 +1,471 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Version: 1.5.8
|
||||||
|
# Author: Tomi Eckert
|
||||||
|
# ==============================================================================
|
||||||
|
# SAP HANA Schema and Tenant Management Tool (hanatool.sh)
|
||||||
|
#
|
||||||
|
# A command-line utility to quickly export/import schemas or backup a tenant.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
# --- Default Settings ---
|
||||||
|
# Define potential HDB client paths
|
||||||
|
HDB_CLIENT_PATH_1="/usr/sap/hdbclient"
|
||||||
|
HDB_CLIENT_PATH_2="/usr/sap/NDB/HDB00/exe"
|
||||||
|
|
||||||
|
# Determine the correct HDB_CLIENT_PATH
|
||||||
|
if [ -d "$HDB_CLIENT_PATH_1" ]; then
|
||||||
|
HDB_CLIENT_PATH="$HDB_CLIENT_PATH_1"
|
||||||
|
elif [ -d "$HDB_CLIENT_PATH_2" ]; then
|
||||||
|
HDB_CLIENT_PATH="$HDB_CLIENT_PATH_2"
|
||||||
|
else
|
||||||
|
echo "❌ Error: Neither '$HDB_CLIENT_PATH_1' nor '$HDB_CLIENT_PATH_2' found."
|
||||||
|
echo "Please install the SAP HANA client or adjust the paths in the script."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
HDBSQL_PATH="${HDB_CLIENT_PATH}/hdbsql"
|
||||||
|
COMPRESS=false
|
||||||
|
THREADS=0 # 0 means auto-calculate later
|
||||||
|
DRY_RUN=false
|
||||||
|
NTFY_TOKEN=""
|
||||||
|
IMPORT_REPLACE=false
|
||||||
|
|
||||||
|
# Detect pigz for parallel compression
|
||||||
|
if command -v pigz &>/dev/null; then
|
||||||
|
USE_PIGZ=true
|
||||||
|
else
|
||||||
|
USE_PIGZ=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Help/Usage Function ---
|
||||||
|
usage() {
|
||||||
|
echo "SAP HANA Schema and Tenant Management Tool"
|
||||||
|
echo ""
|
||||||
|
echo "Usage (Schema): $0 [USER_KEY] export|import [SCHEMA_NAME] [PATH] [OPTIONS]"
|
||||||
|
echo " (Schema): $0 [USER_KEY] import-rename [SCHEMA_NAME] [NEW_SCHEMA_NAME] [PATH] [OPTIONS]"
|
||||||
|
echo " (Tenant): $0 [USER_KEY] backup [PATH] [OPTIONS]"
|
||||||
|
echo ""
|
||||||
|
echo "Actions:"
|
||||||
|
echo " export Export a schema to a specified path."
|
||||||
|
echo " import Import a schema from a specified path."
|
||||||
|
echo " import-rename Import a schema from a path to a new schema name."
|
||||||
|
echo " backup Perform a full backup of the tenant."
|
||||||
|
echo ""
|
||||||
|
echo "Arguments:"
|
||||||
|
echo " USER_KEY The user key from hdbuserstore for DB connection."
|
||||||
|
echo " SCHEMA_NAME The name of the source schema."
|
||||||
|
echo " NEW_SCHEMA_NAME (Required for import-rename only) The target schema name."
|
||||||
|
echo " PATH The file system path for the export/import/backup data."
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -t, --threads N Specify the number of threads (not used for 'backup')."
|
||||||
|
echo " -c, --compress Enable tar.gz compression for exports and backups."
|
||||||
|
echo " -n, --dry-run Show what commands would be executed without running them."
|
||||||
|
echo " --ntfy <token> Send a notification via ntfy.sh upon completion/failure."
|
||||||
|
echo " --replace Use the 'REPLACE' option for imports instead of 'IGNORE EXISTING'."
|
||||||
|
echo " --hdbsql <path> Specify a custom path for the hdbsql executable."
|
||||||
|
echo " -h, --help Show this help message."
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " # Backup the tenant determined by MY_TENANT_KEY and compress the result"
|
||||||
|
echo " $0 MY_TENANT_KEY backup /hana/backups -c --ntfy tk_xxxxxxxxxxxx"
|
||||||
|
echo ""
|
||||||
|
echo " # Import MYSCHEMA from a compressed archive"
|
||||||
|
echo " $0 MY_SCHEMA_KEY import MYSCHEMA /hana/backups/MYSCHEMA_20240101.tar.gz -c"
|
||||||
|
echo ""
|
||||||
|
echo " # Import MYSCHEMA as MYSCHEMA_TEST, replacing any existing objects"
|
||||||
|
echo " $0 MY_SCHEMA_KEY import-rename MYSCHEMA MYSCHEMA_TEST /hana/backups/temp_export --replace"
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- Notification Function ---
|
||||||
|
send_notification() {
|
||||||
|
local message="$1"
|
||||||
|
if [[ -n "$NTFY_TOKEN" && "$DRY_RUN" == "false" ]]; then
|
||||||
|
echo "ℹ️ Sending notification..."
|
||||||
|
curl -s -H "Authorization: Bearer $NTFY_TOKEN" -d "$message" https://ntfy.technopunk.space/sap > /dev/null
|
||||||
|
elif [[ -n "$NTFY_TOKEN" && "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would send notification: curl -H \"Authorization: Bearer ...\" -d \"$message\" https://ntfy.technopunk.space/sap"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- Function to get HANA tenant name ---
|
||||||
|
get_hana_tenant_name() {
|
||||||
|
local user_key="$1"
|
||||||
|
local hdbsql_path="$2"
|
||||||
|
local dry_run="$3"
|
||||||
|
|
||||||
|
local query="SELECT DATABASE_NAME FROM SYS.M_DATABASES;"
|
||||||
|
local tenant_name=""
|
||||||
|
|
||||||
|
if [[ "$dry_run" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would execute hdbsql to get tenant name: \"$hdbsql_path\" -U \"$user_key\" \"$query\""
|
||||||
|
tenant_name="DRYRUN_TENANT"
|
||||||
|
else
|
||||||
|
tenant_name=$("$hdbsql_path" -U "$user_key" "$query" | tail -n +2 | head -n 1 | tr -d '[:space:]' | tr -d '"')
|
||||||
|
if [[ -z "$tenant_name" ]]; then
|
||||||
|
echo "❌ Error: Could not retrieve HANA tenant name using user key '${user_key}'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "$tenant_name"
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- Argument Parsing ---
|
||||||
|
POSITIONAL_ARGS=()
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-t|--threads)
|
||||||
|
THREADS="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-c|--compress)
|
||||||
|
COMPRESS=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-n|--dry-run)
|
||||||
|
DRY_RUN=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--ntfy)
|
||||||
|
NTFY_TOKEN="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--replace)
|
||||||
|
IMPORT_REPLACE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--hdbsql)
|
||||||
|
HDBSQL_PATH="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
POSITIONAL_ARGS+=("$1") # save positional arg
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
|
||||||
|
|
||||||
|
# Assign common positional arguments
|
||||||
|
USER_KEY="$1"
|
||||||
|
ACTION="$2"
|
||||||
|
|
||||||
|
# --- Main Logic ---
|
||||||
|
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "⚠️ --- DRY RUN MODE ENABLED --- ⚠️"
|
||||||
|
echo "No actual commands will be executed."
|
||||||
|
echo "-------------------------------------"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for hdbsql executable
|
||||||
|
if [[ ! -x "$HDBSQL_PATH" ]]; then
|
||||||
|
echo "❌ Error: hdbsql not found or not executable at '${HDBSQL_PATH}'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Calculate default threads if not specified and action is not backup
|
||||||
|
if [[ "$THREADS" -eq 0 && "$ACTION" != "backup" ]]; then
|
||||||
|
TOTAL_THREADS=$(nproc --all)
|
||||||
|
THREADS=$((TOTAL_THREADS / 2))
|
||||||
|
if [[ "$THREADS" -eq 0 ]]; then
|
||||||
|
THREADS=1
|
||||||
|
fi
|
||||||
|
echo "ℹ️ Auto-detected threads to use: ${THREADS}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Execute action based on user input
|
||||||
|
case "$ACTION" in
|
||||||
|
backup)
|
||||||
|
TARGET_PATH="$3"
|
||||||
|
if [[ -z "$USER_KEY" || -z "$TARGET_PATH" ]]; then
|
||||||
|
echo "❌ Error: Missing arguments for 'backup' action."
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "⬇️ Starting tenant backup..."
|
||||||
|
echo " - User Key: ${USER_KEY}"
|
||||||
|
echo " - Path: ${TARGET_PATH}"
|
||||||
|
echo " - Compress: ${COMPRESS}"
|
||||||
|
|
||||||
|
TENANT_NAME=$(get_hana_tenant_name "$USER_KEY" "$HDBSQL_PATH" "$DRY_RUN")
|
||||||
|
echo " - Tenant Name: ${TENANT_NAME}"
|
||||||
|
|
||||||
|
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||||
|
backup_target_dir="$TARGET_PATH" # Initialize with TARGET_PATH
|
||||||
|
backup_path_prefix=""
|
||||||
|
|
||||||
|
if [[ "$COMPRESS" == "true" ]]; then
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
backup_target_dir="${TARGET_PATH}/${TENANT_NAME}_backup_DRYRUN_TEMP" # Use TARGET_PATH
|
||||||
|
else
|
||||||
|
backup_target_dir=$(mktemp -d "${TARGET_PATH}/${TENANT_NAME}_backup_${timestamp}_XXXXXXXX") # Use TARGET_PATH
|
||||||
|
fi
|
||||||
|
echo "ℹ️ Using temporary backup directory: ${backup_target_dir}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would create directory: mkdir -p \"$backup_target_dir\""
|
||||||
|
else
|
||||||
|
mkdir -p "$backup_target_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
|
backup_path_prefix="${backup_target_dir}/backup_${TENANT_NAME}_${timestamp}"
|
||||||
|
|
||||||
|
QUERY="BACKUP DATA USING FILE ('${backup_path_prefix}')"
|
||||||
|
|
||||||
|
EXIT_CODE=0
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would execute hdbsql: \"$HDBSQL_PATH\" -U \"$USER_KEY\" \"$QUERY\""
|
||||||
|
else
|
||||||
|
"$HDBSQL_PATH" -U "$USER_KEY" "$QUERY" > /dev/null 2>&1
|
||||||
|
EXIT_CODE=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$EXIT_CODE" -eq 0 ]]; then
|
||||||
|
echo "✅ Successfully initiated tenant backup with prefix '${backup_path_prefix}'."
|
||||||
|
if [[ "$COMPRESS" == "true" ]]; then
|
||||||
|
ARCHIVE_FILE="${TARGET_PATH}/${TENANT_NAME}_backup_${timestamp}.tar.gz"
|
||||||
|
echo "🗜️ Compressing backup files to '${ARCHIVE_FILE}'..."
|
||||||
|
|
||||||
|
TAR_EXIT_CODE=0
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
if [[ "$USE_PIGZ" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would execute tar (pigz): tar -I \"pigz -p $THREADS\" -cf \"$ARCHIVE_FILE\" -C \"$backup_target_dir\" ."
|
||||||
|
else
|
||||||
|
echo "[DRY RUN] Would execute tar: tar -czf \"$ARCHIVE_FILE\" -C \"$backup_target_dir\" ."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [[ "$USE_PIGZ" == "true" ]]; then
|
||||||
|
tar -I "pigz -p $THREADS" -cf "$ARCHIVE_FILE" -C "$backup_target_dir" .
|
||||||
|
else
|
||||||
|
tar -czf "$ARCHIVE_FILE" -C "$backup_target_dir" .
|
||||||
|
fi
|
||||||
|
TAR_EXIT_CODE=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$TAR_EXIT_CODE" -eq 0 ]]; then
|
||||||
|
echo "✅ Successfully created archive."
|
||||||
|
echo "🧹 Cleaning up temporary directory..."
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would remove temp directory: rm -rf \"$backup_target_dir\""
|
||||||
|
else
|
||||||
|
rm -rf "$backup_target_dir"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "❌ Error: Failed to create archive from '${backup_target_dir}'."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
send_notification "✅ HANA tenant '${TENANT_NAME}' backup completed successfully."
|
||||||
|
else
|
||||||
|
echo "❌ Error: Failed to initiate tenant backup (hdbsql exit code: ${EXIT_CODE})."
|
||||||
|
send_notification "❌ HANA tenant '${TENANT_NAME}' backup FAILED."
|
||||||
|
if [[ "$COMPRESS" == "true" && "$DRY_RUN" == "false" ]]; then rm -rf "$backup_target_dir"; fi
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
export)
|
||||||
|
SCHEMA_NAME="$3"
|
||||||
|
TARGET_PATH="$4"
|
||||||
|
if [[ -z "$USER_KEY" || -z "$SCHEMA_NAME" || -z "$TARGET_PATH" ]]; then
|
||||||
|
echo "❌ Error: Missing arguments for 'export' action."
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "⬇️ Starting schema export..."
|
||||||
|
echo " - User Key: ${USER_KEY}"
|
||||||
|
echo " - Schema: ${SCHEMA_NAME}"
|
||||||
|
echo " - Path: ${TARGET_PATH}"
|
||||||
|
echo " - Compress: ${COMPRESS}"
|
||||||
|
echo " - Threads: ${THREADS}"
|
||||||
|
|
||||||
|
EXPORT_DIR="$TARGET_PATH"
|
||||||
|
if [[ "$COMPRESS" == "true" ]]; then
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
EXPORT_DIR="${TARGET_PATH}/export_${SCHEMA_NAME}_DRYRUN_TEMP"
|
||||||
|
else
|
||||||
|
EXPORT_DIR=$(mktemp -d "${TARGET_PATH}/export_${SCHEMA_NAME}_XXXXXXXX")
|
||||||
|
fi
|
||||||
|
echo "ℹ️ Using temporary export directory: ${EXPORT_DIR}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would create directory: mkdir -p \"$EXPORT_DIR\""
|
||||||
|
else
|
||||||
|
mkdir -p "$EXPORT_DIR"
|
||||||
|
fi
|
||||||
|
|
||||||
|
QUERY="EXPORT \"${SCHEMA_NAME}\".\"*\" AS BINARY INTO '${EXPORT_DIR}' WITH REPLACE THREADS ${THREADS} NO DEPENDENCIES;"
|
||||||
|
|
||||||
|
EXIT_CODE=0
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would execute hdbsql: \"$HDBSQL_PATH\" -U \"$USER_KEY\" \"$QUERY\""
|
||||||
|
else
|
||||||
|
"$HDBSQL_PATH" -U "$USER_KEY" "$QUERY" > /dev/null 2>&1
|
||||||
|
EXIT_CODE=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$EXIT_CODE" -eq 0 ]]; then
|
||||||
|
echo "✅ Successfully exported schema '${SCHEMA_NAME}' to '${EXPORT_DIR}'."
|
||||||
|
if [[ "$COMPRESS" == "true" ]]; then
|
||||||
|
ARCHIVE_FILE="${TARGET_PATH}/${SCHEMA_NAME}_$(date +%Y%m%d_%H%M%S).tar.gz"
|
||||||
|
echo "🗜️ Compressing files to '${ARCHIVE_FILE}'..."
|
||||||
|
|
||||||
|
TAR_EXIT_CODE=0
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
if [[ "$USE_PIGZ" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would execute tar (pigz): tar -I \"pigz -p $THREADS\" -cf \"$ARCHIVE_FILE\" -C \"$(dirname "$EXPORT_DIR")\" \"$(basename "$EXPORT_DIR")\""
|
||||||
|
else
|
||||||
|
echo "[DRY RUN] Would execute tar: tar -czf \"$ARCHIVE_FILE\" -C \"$(dirname "$EXPORT_DIR")\" \"$(basename "$EXPORT_DIR")\""
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [[ "$USE_PIGZ" == "true" ]]; then
|
||||||
|
tar -I "pigz -p $THREADS" -cf "$ARCHIVE_FILE" -C "$(dirname "$EXPORT_DIR")" "$(basename "$EXPORT_DIR")"
|
||||||
|
else
|
||||||
|
tar -czf "$ARCHIVE_FILE" -C "$(dirname "$EXPORT_DIR")" "$(basename "$EXPORT_DIR")"
|
||||||
|
fi
|
||||||
|
TAR_EXIT_CODE=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$TAR_EXIT_CODE" -eq 0 ]]; then
|
||||||
|
echo "✅ Successfully created archive."
|
||||||
|
echo "🧹 Cleaning up temporary directory..."
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would remove temp directory: rm -rf \"$EXPORT_DIR\""
|
||||||
|
else
|
||||||
|
rm -rf "$EXPORT_DIR"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "❌ Error: Failed to create archive from '${EXPORT_DIR}'."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
send_notification "✅ Export of schema '${SCHEMA_NAME}' completed successfully."
|
||||||
|
else
|
||||||
|
echo "❌ Error: Failed to export schema '${SCHEMA_NAME}' (hdbsql exit code: ${EXIT_CODE})."
|
||||||
|
send_notification "❌ Export of schema '${SCHEMA_NAME}' FAILED."
|
||||||
|
if [[ "$COMPRESS" == "true" && "$DRY_RUN" == "false" ]]; then rm -rf "$EXPORT_DIR"; fi
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
import|import-rename)
|
||||||
|
SCHEMA_NAME="$3"
|
||||||
|
if [[ "$ACTION" == "import" ]]; then
|
||||||
|
SOURCE_PATH="$4"
|
||||||
|
NEW_SCHEMA_NAME=""
|
||||||
|
if [[ -z "$USER_KEY" || -z "$SCHEMA_NAME" || -z "$SOURCE_PATH" ]]; then
|
||||||
|
echo "❌ Error: Missing arguments for 'import' action."
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else # import-rename
|
||||||
|
NEW_SCHEMA_NAME="$4"
|
||||||
|
SOURCE_PATH="$5"
|
||||||
|
if [[ -z "$USER_KEY" || -z "$SCHEMA_NAME" || -z "$NEW_SCHEMA_NAME" || -z "$SOURCE_PATH" ]]; then
|
||||||
|
echo "❌ Error: Missing arguments for 'import-rename' action."
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "⬆️ Starting schema import..."
|
||||||
|
echo " - User Key: ${USER_KEY}"
|
||||||
|
echo " - Source Schema: ${SCHEMA_NAME}"
|
||||||
|
if [[ -n "$NEW_SCHEMA_NAME" ]]; then
|
||||||
|
echo " - Target Schema: ${NEW_SCHEMA_NAME}"
|
||||||
|
fi
|
||||||
|
echo " - Path: ${SOURCE_PATH}"
|
||||||
|
echo " - Compress: ${COMPRESS}"
|
||||||
|
echo " - Threads: ${THREADS}"
|
||||||
|
|
||||||
|
IMPORT_DIR="$SOURCE_PATH"
|
||||||
|
if [[ "$COMPRESS" == "true" ]]; then
|
||||||
|
if [[ ! -f "$SOURCE_PATH" && "$DRY_RUN" == "false" ]]; then
|
||||||
|
echo "❌ Error: Source path '${SOURCE_PATH}' is not a valid file for compressed import."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
IMPORT_DIR="/tmp/import_${SCHEMA_NAME}_DRYRUN_TEMP"
|
||||||
|
else
|
||||||
|
IMPORT_DIR=$(mktemp -d "/tmp/import_${SCHEMA_NAME}_XXXXXXXX")
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "ℹ️ Decompressing to temporary directory: ${IMPORT_DIR}"
|
||||||
|
|
||||||
|
TAR_EXIT_CODE=0
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would decompress archive: tar -xzf \"$SOURCE_PATH\" -C \"$IMPORT_DIR\" --strip-components=1"
|
||||||
|
else
|
||||||
|
tar -xzf "$SOURCE_PATH" -C "$IMPORT_DIR" --strip-components=1
|
||||||
|
TAR_EXIT_CODE=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$TAR_EXIT_CODE" -ne 0 ]]; then
|
||||||
|
echo "❌ Error: Failed to decompress '${SOURCE_PATH}'."
|
||||||
|
if [[ "$DRY_RUN" == "false" ]]; then rm -rf "$IMPORT_DIR"; fi
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -d "$IMPORT_DIR" && "$DRY_RUN" == "false" ]]; then
|
||||||
|
echo "❌ Error: Import directory '${IMPORT_DIR}' does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
import_options=""
|
||||||
|
if [[ "$IMPORT_REPLACE" == "true" ]]; then
|
||||||
|
import_options="REPLACE"
|
||||||
|
echo " - Mode: REPLACE"
|
||||||
|
else
|
||||||
|
import_options="IGNORE EXISTING"
|
||||||
|
echo " - Mode: IGNORE EXISTING (default)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$ACTION" == "import-rename" ]]; then
|
||||||
|
import_options="${import_options} RENAME SCHEMA \"${SCHEMA_NAME}\" TO \"${NEW_SCHEMA_NAME}\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
QUERY="IMPORT \"${SCHEMA_NAME}\".\"*\" AS BINARY FROM '${IMPORT_DIR}' WITH ${import_options} THREADS ${THREADS};"
|
||||||
|
|
||||||
|
EXIT_CODE=0
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would execute hdbsql: \"$HDBSQL_PATH\" -U \"$USER_KEY\" \"$QUERY\""
|
||||||
|
else
|
||||||
|
"$HDBSQL_PATH" -U "$USER_KEY" "$QUERY" > /dev/null 2>&1
|
||||||
|
EXIT_CODE=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
target_schema_name="${NEW_SCHEMA_NAME:-$SCHEMA_NAME}"
|
||||||
|
if [[ "$EXIT_CODE" -eq 0 ]]; then
|
||||||
|
echo "✅ Successfully imported schema."
|
||||||
|
send_notification "✅ ${ACTION} of schema '${SCHEMA_NAME}' to '${target_schema_name}' completed successfully."
|
||||||
|
else
|
||||||
|
echo "❌ Error: Failed to import schema (hdbsql exit code: ${EXIT_CODE})."
|
||||||
|
send_notification "❌ ${ACTION} of schema '${SCHEMA_NAME}' to '${target_schema_name}' FAILED."
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$COMPRESS" == "true" ]]; then
|
||||||
|
echo "🧹 Cleaning up temporary directory..."
|
||||||
|
if [[ "$DRY_RUN" == "true" ]]; then
|
||||||
|
echo "[DRY RUN] Would remove temp directory: rm -rf \"$IMPORT_DIR\""
|
||||||
|
else
|
||||||
|
rm -rf "$IMPORT_DIR"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "❌ Error: Invalid action '${ACTION}'."
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "✅ Process complete."
|
||||||
|
|
||||||
241
templates/install.sh
Normal file
241
templates/install.sh
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Author: Tomi Eckert
|
||||||
|
# --- Main Script ---
|
||||||
|
|
||||||
|
# This script presents a menu of software packages, or installs them
|
||||||
|
# non-interactively via command-line arguments. It downloads files from a
|
||||||
|
# remote configuration, shows a diff for config updates, and checks versions.
|
||||||
|
|
||||||
|
# --- Functions ---
|
||||||
|
|
||||||
|
# Get the version from a local script file.
|
||||||
|
get_local_version() {
|
||||||
|
local file_path="$1"
|
||||||
|
if [[ -f "${file_path}" ]]; then
|
||||||
|
head -n 5 "${file_path}" | grep -m 1 "^# Version:" | awk '{print $NF}'
|
||||||
|
else
|
||||||
|
echo "0.0.0" # Return a base version if file doesn't exist.
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compare two version strings. Returns 0 if v1 is newer.
|
||||||
|
is_version_greater() {
|
||||||
|
local v1=$1
|
||||||
|
local v2=$2
|
||||||
|
if [[ "$(printf '%s\n' "$v1" "$v2" | sort -V | head -n 1)" != "$v1" ]]; then
|
||||||
|
return 0 # v1 is greater
|
||||||
|
else
|
||||||
|
return 1 # v1 is not greater (equal or less)
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Process a single selected package.
|
||||||
|
process_package() {
|
||||||
|
local choice_key="$1"
|
||||||
|
local force_overwrite="$2" # Expects "true" or "false"
|
||||||
|
|
||||||
|
if [[ -z "${SCRIPT_PACKAGES[$choice_key]}" ]]; then
|
||||||
|
echo "[❌] Invalid package name provided: '${choice_key}'"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "[⬇️] Processing package: '${choice_key}'..."
|
||||||
|
|
||||||
|
# Parse the new config format
|
||||||
|
config_value="${SCRIPT_PACKAGES[$choice_key]}"
|
||||||
|
display_name=$(echo "${config_value}" | cut -d'|' -f1)
|
||||||
|
remote_version=$(echo "${config_value}" | cut -d'|' -f2)
|
||||||
|
description=$(echo "${config_value}" | cut -d'|' -f3)
|
||||||
|
urls_to_download=$(echo "${config_value}" | cut -d'|' -f4)
|
||||||
|
install_script=$(echo "${config_value}" | cut -d'|' -f5) # Optional install script
|
||||||
|
|
||||||
|
read -r -a urls_to_download_array <<< "$urls_to_download"
|
||||||
|
|
||||||
|
for url in "${urls_to_download_array[@]}"; do
|
||||||
|
filename=$(basename "${url}")
|
||||||
|
# Handle config file overwrites
|
||||||
|
if [[ "${filename}" == *.conf && -f "${filename}" ]]; then
|
||||||
|
if [[ "$force_overwrite" == "true" ]]; then
|
||||||
|
echo "[⚠️] Overwriting '${filename}' due to --overwrite-config flag."
|
||||||
|
if ! curl -fsSL -o "${filename}" "${url}"; then
|
||||||
|
echo "[❌] Error: Failed to download '${filename}'."
|
||||||
|
fi
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[->] Found existing config file: '${filename}'."
|
||||||
|
tmp_file=$(mktemp)
|
||||||
|
if curl -fsSL -o "${tmp_file}" "${url}"; then
|
||||||
|
echo "[🔎] Comparing versions..."
|
||||||
|
echo "-------------------- DIFF START --------------------"
|
||||||
|
if command -v colordiff &> /dev/null; then
|
||||||
|
colordiff -u "${filename}" "${tmp_file}"
|
||||||
|
else
|
||||||
|
diff --color=always -u "${filename}" "${tmp_file}" 2>/dev/null || diff -u "${filename}" "${tmp_file}"
|
||||||
|
fi
|
||||||
|
echo "--------------------- DIFF END ---------------------"
|
||||||
|
read -p "Do you want to overwrite '${filename}'? (y/N) " -n 1 -r REPLY
|
||||||
|
echo
|
||||||
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
mv "${tmp_file}" "${filename}"
|
||||||
|
echo "[✅] Updated '${filename}'."
|
||||||
|
else
|
||||||
|
rm "${tmp_file}"
|
||||||
|
echo "[🤷] Kept existing version of '${filename}'."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "[❌] Error downloading new version of '${filename}' for comparison."
|
||||||
|
rm -f "${tmp_file}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Original download logic for all other files.
|
||||||
|
echo "[->] Downloading '${filename}'..."
|
||||||
|
if curl -fsSL -o "${filename}" "${url}"; then
|
||||||
|
echo "[✅] Successfully downloaded '${filename}'."
|
||||||
|
if [[ "${filename}" == *.sh || "${filename}" == *.bash ]]; then
|
||||||
|
chmod +x "${filename}"
|
||||||
|
echo "[🤖] Made '${filename}' executable."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "[❌] Error: Failed to download '${filename}'."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -n "${install_script}" ]]; then
|
||||||
|
echo "[⚙️] Running install script for '${choice_key}'..."
|
||||||
|
#eval "${install_script}"
|
||||||
|
bash -c "$(curl -sSL $install_script)"
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "[✅] Install script completed successfully."
|
||||||
|
else
|
||||||
|
echo "[❌] Install script failed with exit code $?."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "[📦] Package processing complete for '${choice_key}'."
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- Main Logic ---
|
||||||
|
|
||||||
|
conf_file="packages.conf.$(date +%Y%m%d%H%M%S)"
|
||||||
|
trap 'rm -f "${conf_file}"' EXIT
|
||||||
|
|
||||||
|
echo "[🔄] Downloading configuration file..."
|
||||||
|
if ! curl -fsSL -o "${conf_file}" "https://git.technopunk.space/tomi/Scripts/raw/branch/main/packages.conf"; then
|
||||||
|
echo "[❌] Error: Failed to download packages.conf. Exiting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[✅] Configuration file downloaded successfully."
|
||||||
|
|
||||||
|
source "${conf_file}"
|
||||||
|
|
||||||
|
# --- Argument Parsing for Non-Interactive Mode ---
|
||||||
|
if [ "$#" -gt 0 ]; then
|
||||||
|
declare -a packages_to_install
|
||||||
|
overwrite_configs=false
|
||||||
|
for arg in "$@"; do
|
||||||
|
case $arg in
|
||||||
|
--overwrite-config)
|
||||||
|
overwrite_configs=true
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "[❌] Unknown flag: $arg" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
packages_to_install+=("$arg")
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#packages_to_install[@]} -eq 0 ]; then
|
||||||
|
echo "[❌] Flag provided with no package names. Exiting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[🚀] Running in non-interactive mode."
|
||||||
|
for pkg_key in "${packages_to_install[@]}"; do
|
||||||
|
if [[ -n "${SCRIPT_PACKAGES[$pkg_key]}" ]]; then
|
||||||
|
process_package "$pkg_key" "$overwrite_configs"
|
||||||
|
else
|
||||||
|
echo "[⚠️] Unknown package: '$pkg_key'. Skipping."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo "[🏁] Non-interactive run complete."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Interactive Mode ---
|
||||||
|
declare -a ordered_keys
|
||||||
|
package_keys_sorted=($(for k in "${!SCRIPT_PACKAGES[@]}"; do echo $k; done | sort))
|
||||||
|
ordered_keys=("${package_keys_sorted[@]}")
|
||||||
|
|
||||||
|
# --- Display Menu ---
|
||||||
|
echo
|
||||||
|
echo "-------------------------------------"
|
||||||
|
echo " Script Downloader "
|
||||||
|
echo "-------------------------------------"
|
||||||
|
echo "[🔎] Checking for updates..."
|
||||||
|
echo
|
||||||
|
|
||||||
|
for i in "${!ordered_keys[@]}"; do
|
||||||
|
key="${ordered_keys[$i]}"
|
||||||
|
config_value="${SCRIPT_PACKAGES[$key]}"
|
||||||
|
display_name=$(echo "${config_value}" | cut -d'|' -f1)
|
||||||
|
remote_version=$(echo "${config_value}" | cut -d'|' -f2)
|
||||||
|
description=$(echo "${config_value}" | cut -d'|' -f3)
|
||||||
|
urls=$(echo "${config_value}" | cut -d'|' -f4)
|
||||||
|
# install_script=$(echo "${config_value}" | cut -d'|' -f5) # Not used for display in menu
|
||||||
|
read -r -a url_array <<< "$urls"
|
||||||
|
main_script_filename=$(basename "${url_array[0]}")
|
||||||
|
local_version=$(get_local_version "${main_script_filename}")
|
||||||
|
|
||||||
|
# Print main package line
|
||||||
|
echo -e "\033[1m$((i+1))) $key - $display_name (v$remote_version)\033[0m"
|
||||||
|
# Print description
|
||||||
|
echo " $description"
|
||||||
|
# Print status
|
||||||
|
if [[ -f "${main_script_filename}" ]]; then
|
||||||
|
if is_version_greater "$remote_version" "$local_version"; then
|
||||||
|
echo -e " \033[33m[Update available: v${local_version} -> v${remote_version}]\033[0m"
|
||||||
|
else
|
||||||
|
echo -e " \033[32m[Installed: v${local_version}]\033[0m"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
quit_num=$((${#ordered_keys[@]} + 1))
|
||||||
|
echo -e "\033[1m${quit_num}) Quit\033[0m"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# --- Handle User Input ---
|
||||||
|
read -p "Please enter your choice(s) (e.g., 1 3 4), or press Enter to quit: " -r -a user_choices
|
||||||
|
|
||||||
|
if [ ${#user_choices[@]} -eq 0 ]; then
|
||||||
|
echo "[👋] No selection made. Exiting."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
for choice_num in "${user_choices[@]}"; do
|
||||||
|
if ! [[ "$choice_num" =~ ^[0-9]+$ ]]; then
|
||||||
|
echo "[⚠️] Skipping invalid input: '${choice_num}'. Not a number."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [ "$choice_num" -eq "$quit_num" ]; then
|
||||||
|
echo "[👋] Quit selected. Exiting."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
index=$((choice_num - 1))
|
||||||
|
if [[ -z "${ordered_keys[$index]}" ]]; then
|
||||||
|
echo "[⚠️] Skipping invalid choice: '${choice_num}'. Out of range."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
choice_key="${ordered_keys[$index]}"
|
||||||
|
process_package "$choice_key" "false" # Never force overwrite in interactive mode
|
||||||
|
done
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "[🏁] All selected packages have been processed."
|
||||||
|
|
||||||
208
templates/keymanager.sh
Normal file
208
templates/keymanager.sh
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Version: 1.2.3
|
||||||
|
# Author: Tomi Eckert
|
||||||
|
|
||||||
|
# A script to interactively manage SAP HANA hdbuserstore keys, with testing.
|
||||||
|
|
||||||
|
# --- Style Definitions ---
|
||||||
|
COLOR_BLUE='\033[1;34m'
|
||||||
|
COLOR_GREEN='\033[1;32m'
|
||||||
|
COLOR_YELLOW='\033[1;33m'
|
||||||
|
COLOR_RED='\033[1;31m'
|
||||||
|
COLOR_NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# --- Configuration ---
|
||||||
|
# Adjust these paths if your HANA client is installed elsewhere.
|
||||||
|
# Define potential HDB client paths
|
||||||
|
HDB_CLIENT_PATH_1="/usr/sap/hdbclient"
|
||||||
|
HDB_CLIENT_PATH_2="/usr/sap/NDB/HDB00/exe"
|
||||||
|
|
||||||
|
# Check which path exists and set HDB_CLIENT_PATH accordingly
|
||||||
|
if [ -d "$HDB_CLIENT_PATH_1" ]; then
|
||||||
|
HDB_CLIENT_PATH="$HDB_CLIENT_PATH_1"
|
||||||
|
elif [ -d "$HDB_CLIENT_PATH_2" ]; then
|
||||||
|
HDB_CLIENT_PATH="$HDB_CLIENT_PATH_2"
|
||||||
|
else
|
||||||
|
echo -e "${COLOR_RED}❌ Error: Neither '$HDB_CLIENT_PATH_1' nor '$HDB_CLIENT_PATH_2' found.${COLOR_NC}"
|
||||||
|
echo -e "${COLOR_RED}Please install the SAP HANA client or adjust the paths in the script.${COLOR_NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
HDB_USERSTORE_EXEC="${HDB_CLIENT_PATH}/hdbuserstore"
|
||||||
|
HDB_SQL_EXEC="${HDB_CLIENT_PATH}/hdbsql"
|
||||||
|
|
||||||
|
# --- Function: Test Key Connection ---
|
||||||
|
# @param $1: The key name to test.
|
||||||
|
# @return: 0 for success, 1 for failure.
|
||||||
|
test_key() {
|
||||||
|
local key_to_test=$1
|
||||||
|
if [ -z "$key_to_test" ]; then
|
||||||
|
echo -e "${COLOR_RED} ❌ Error: No key name provided for testing.${COLOR_NC}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "\n${COLOR_YELLOW}🧪 Testing connection for key '${key_to_test}'...${COLOR_NC}"
|
||||||
|
|
||||||
|
# Execute hdbsql, capturing both stdout and stderr.
|
||||||
|
# The query is simple and lightweight, designed just to validate the connection.
|
||||||
|
test_output=$("$HDB_SQL_EXEC" -U "$key_to_test" "SELECT 'Connection successful' FROM DUMMY" 2>&1)
|
||||||
|
local exit_code=$?
|
||||||
|
|
||||||
|
if [ $exit_code -eq 0 ] && [[ "$test_output" == *"Connection successful"* ]]; then
|
||||||
|
echo -e "${COLOR_GREEN} ✅ Connection test successful!${COLOR_NC}"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo -e "${COLOR_RED} ❌ Connection test failed for key '${key_to_test}'.${COLOR_NC}"
|
||||||
|
echo -e "${COLOR_RED} Error details:${COLOR_NC}"
|
||||||
|
# Indent the error message for better readability.
|
||||||
|
echo "$test_output" | sed 's/^/ /'
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- Function: Create New Key ---
|
||||||
|
create_new_key() {
|
||||||
|
current_hostname=$(hostname)
|
||||||
|
|
||||||
|
echo -e "\n${COLOR_BLUE}🔑 --- Create New Secure Key ---${COLOR_NC}"
|
||||||
|
read -p "Enter the Key Name [CRONKEY]: " key_name
|
||||||
|
read -p "Enter the HANA Host [${current_hostname}]: " hdb_host
|
||||||
|
read -p "Enter the Instance Number [00]: " hdb_instance
|
||||||
|
|
||||||
|
# Ask if connecting to SYSTEMDB to format the connection string correctly.
|
||||||
|
read -p "Is this for the SYSTEMDB tenant? (y/n) [n]: " is_systemdb
|
||||||
|
|
||||||
|
# Set default values for prompts
|
||||||
|
key_name=${key_name:-"CRONKEY"}
|
||||||
|
hdb_host=${hdb_host:-$current_hostname}
|
||||||
|
hdb_instance=${hdb_instance:-"00"}
|
||||||
|
is_systemdb=${is_systemdb:-"n"}
|
||||||
|
|
||||||
|
# Conditionally build the connection string
|
||||||
|
if [[ "$is_systemdb" =~ ^[Yy]$ ]]; then
|
||||||
|
CONNECTION_STRING="${hdb_host}:3${hdb_instance}13"
|
||||||
|
echo -e "${COLOR_YELLOW}💡 Connecting to SYSTEMDB. Tenant name will be omitted from the connection string.${COLOR_NC}"
|
||||||
|
else
|
||||||
|
read -p "Enter the Tenant DB [NDB]: " hdb_tenant
|
||||||
|
hdb_tenant=${hdb_tenant:-"NDB"}
|
||||||
|
CONNECTION_STRING="${hdb_host}:3${hdb_instance}15@${hdb_tenant}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
read -p "Enter the Database User [SYSTEM]: " hdb_user
|
||||||
|
read -sp "Enter the Database Password: " hdb_pass
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
hdb_user=${hdb_user:-"SYSTEM"}
|
||||||
|
|
||||||
|
echo -e "\n${COLOR_YELLOW}📝 Review the command below (password is hidden):"
|
||||||
|
echo "------------------------------------------------------"
|
||||||
|
printf "${HDB_USERSTORE_EXEC} SET \"%s\" \"%s\" \"%s\" \"<password>\"\n" "$key_name" "$CONNECTION_STRING" "$hdb_user"
|
||||||
|
echo -e "------------------------------------------------------${COLOR_NC}"
|
||||||
|
|
||||||
|
read -p "❓ Execute this command? (y/n): " execute_now
|
||||||
|
if [[ "$execute_now" =~ ^[Yy]$ ]]; then
|
||||||
|
echo -e "\n${COLOR_GREEN}⚙️ Executing command...${COLOR_NC}"
|
||||||
|
# Create the key first
|
||||||
|
if "$HDB_USERSTORE_EXEC" SET "$key_name" "$CONNECTION_STRING" "$hdb_user" "$hdb_pass"; then
|
||||||
|
echo -e "${COLOR_GREEN} ✅ Success! Key '${key_name}' stored locally.${COLOR_NC}"
|
||||||
|
|
||||||
|
# Immediately test the new key
|
||||||
|
if ! test_key "$key_name"; then
|
||||||
|
# If the test fails, roll back by deleting the key
|
||||||
|
echo -e "\n${COLOR_YELLOW} 롤 Rolling back: Deleting the newly created key '${key_name}' due to connection failure.${COLOR_NC}"
|
||||||
|
if "$HDB_USERSTORE_EXEC" DELETE "$key_name"; then
|
||||||
|
echo -e "${COLOR_GREEN} ✅ Key '${key_name}' successfully deleted.${COLOR_NC}"
|
||||||
|
else
|
||||||
|
echo -e "${COLOR_RED} ❌ Error: Failed to automatically delete the key '${key_name}'. Please remove it manually.${COLOR_NC}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${COLOR_RED} ❌ Error: Failed to store key '${key_name}'. Please check details and credentials.${COLOR_NC}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "\n${COLOR_YELLOW}🛑 Execution aborted by user.${COLOR_NC}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- Function: Delete Key ---
|
||||||
|
delete_key() {
|
||||||
|
echo -e "\n${COLOR_BLUE}🗑️ --- Delete Existing Secure Key ---${COLOR_NC}"
|
||||||
|
|
||||||
|
keys=$("$HDB_USERSTORE_EXEC" list 2>/dev/null | tail -n +3 | grep '^KEY ' | awk '{print $2}')
|
||||||
|
if [ -z "$keys" ]; then
|
||||||
|
echo -e "${COLOR_YELLOW}🤷 No keys found to delete.${COLOR_NC}"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
PS3=$'\nPlease select a key to delete (or Ctrl+C to cancel): '
|
||||||
|
select key_to_delete in $keys; do
|
||||||
|
if [ -n "$key_to_delete" ]; then
|
||||||
|
read -p "❓ PERMANENTLY delete the key '$key_to_delete'? (y/n): " confirm
|
||||||
|
if [[ "$confirm" =~ ^[Yy]$ ]]; then
|
||||||
|
echo -e "\n${COLOR_GREEN}⚙️ Deleting key '$key_to_delete'...${COLOR_NC}"
|
||||||
|
if "$HDB_USERSTORE_EXEC" DELETE "$key_to_delete"; then
|
||||||
|
echo -e "${COLOR_GREEN} ✅ Success! Key '$key_to_delete' has been deleted.${COLOR_NC}"
|
||||||
|
else
|
||||||
|
echo -e "${COLOR_RED} ❌ Error: Failed to delete the key.${COLOR_NC}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "\n${COLOR_YELLOW}🛑 Deletion aborted by user.${COLOR_NC}"
|
||||||
|
fi
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo -e "${COLOR_RED}❌ Invalid selection. Try again.${COLOR_NC}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- Function: List and Test a Key ---
|
||||||
|
list_and_test_key() {
|
||||||
|
echo -e "\n${COLOR_BLUE}🧪 --- Test an Existing Secure Key ---${COLOR_NC}"
|
||||||
|
|
||||||
|
keys=$("$HDB_USERSTORE_EXEC" list 2>/dev/null | tail -n +3 | grep '^KEY ' | awk '{print $2}')
|
||||||
|
if [ -z "$keys" ]; then
|
||||||
|
echo -e "${COLOR_YELLOW}🤷 No keys found to test.${COLOR_NC}"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
PS3=$'\nPlease select a key to test (or Ctrl+C to cancel): '
|
||||||
|
select key_to_test in $keys; do
|
||||||
|
if [ -n "$key_to_test" ]; then
|
||||||
|
test_key "$key_to_test"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo -e "${COLOR_RED}❌ Invalid selection. Try again.${COLOR_NC}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# --- Main Menu ---
|
||||||
|
while true; do
|
||||||
|
echo -e "\n${COLOR_BLUE}🔐 ========== SAP HANA Secure User Store Key Manager ==========${COLOR_NC}"
|
||||||
|
echo "1) Create a New Key"
|
||||||
|
echo "2) Delete an Existing Key"
|
||||||
|
echo "3) Test an Existing Key"
|
||||||
|
echo "4) Exit"
|
||||||
|
|
||||||
|
read -p $'\nPlease select an option: ' choice
|
||||||
|
|
||||||
|
case $choice in
|
||||||
|
1)
|
||||||
|
create_new_key
|
||||||
|
;;
|
||||||
|
2)
|
||||||
|
delete_key
|
||||||
|
;;
|
||||||
|
3)
|
||||||
|
list_and_test_key
|
||||||
|
;;
|
||||||
|
4)
|
||||||
|
echo "👋 Exiting."
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo -e "${COLOR_RED}❌ Invalid option '$choice'. Please try again.${COLOR_NC}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
244
templates/monitor.sh
Normal file
244
templates/monitor.sh
Normal file
@@ -0,0 +1,244 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Version: 1.3.1
|
||||||
|
# Author: Tomi Eckert
|
||||||
|
# =============================================================================
|
||||||
|
# SAP HANA Monitoring Script
|
||||||
|
#
|
||||||
|
# Checks HANA processes, disk usage, log segments, and statement queue.
|
||||||
|
# Sends ntfy.sh notifications if thresholds are exceeded.
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# --- Lock File Implementation ---
|
||||||
|
LOCK_FILE="/tmp/hana_monitor.lock"
|
||||||
|
if [ -e "$LOCK_FILE" ]; then
|
||||||
|
echo "▶️ Script is already running. Exiting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
touch "$LOCK_FILE"
|
||||||
|
# Ensure lock file is removed on script exit
|
||||||
|
trap 'rm -f "$LOCK_FILE"' EXIT
|
||||||
|
|
||||||
|
# --- Configuration and Setup ---
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
|
||||||
|
CONFIG_FILE="${SCRIPT_DIR}/monitor.conf"
|
||||||
|
|
||||||
|
if [ ! -f "$CONFIG_FILE" ]; then
|
||||||
|
echo "❌ Error: Configuration file not found at ${CONFIG_FILE}" >&2
|
||||||
|
rm -f "$LOCK_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
source "$CONFIG_FILE"
|
||||||
|
|
||||||
|
STATE_DIR="${SCRIPT_DIR}/monitor_state"
|
||||||
|
mkdir -p "${STATE_DIR}"
|
||||||
|
|
||||||
|
# Helper functions for state management
|
||||||
|
get_state() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${STATE_DIR}/${key}.state" ]; then
|
||||||
|
cat "${STATE_DIR}/${key}.state"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
set_state() {
|
||||||
|
local key="$1"
|
||||||
|
local value="$2"
|
||||||
|
echo "$value" > "${STATE_DIR}/${key}.state"
|
||||||
|
}
|
||||||
|
|
||||||
|
HOSTNAME=$(hostname)
|
||||||
|
SQL_QUERY="SELECT b.host, b.service_name, a.state, count(*) FROM PUBLIC.M_LOG_SEGMENTS a JOIN PUBLIC.M_SERVICES b ON (a.host = b.host AND a.port = b.port) GROUP BY b.host, b.service_name, a.state;"
|
||||||
|
|
||||||
|
send_notification_if_changed() {
|
||||||
|
local alert_key="$1"
|
||||||
|
local title_prefix="$2" # e.g., "HANA Process"
|
||||||
|
local current_message="$3"
|
||||||
|
local is_alert_condition="$4" # "true" or "false"
|
||||||
|
local current_value="$5" # The value to store as state (e.g., "85%", "GREEN", "ALERT")
|
||||||
|
|
||||||
|
local previous_value=$(get_state "${alert_key}")
|
||||||
|
|
||||||
|
if [ "$current_value" != "$previous_value" ]; then
|
||||||
|
local full_title=""
|
||||||
|
local full_message=""
|
||||||
|
|
||||||
|
if [ "$is_alert_condition" == "true" ]; then
|
||||||
|
full_title="${title_prefix} Alert"
|
||||||
|
full_message="🚨 Critical: ${current_message}"
|
||||||
|
else
|
||||||
|
# Check if it was previously an alert (i.e., previous_value was not "OK")
|
||||||
|
if [ -n "$previous_value" ] && [ "$previous_value" != "OK" ]; then
|
||||||
|
full_title="${title_prefix} Resolved"
|
||||||
|
full_message="✅ Resolved: ${current_message}"
|
||||||
|
else
|
||||||
|
# No alert, and no previous alert to resolve, so just update state silently
|
||||||
|
set_state "${alert_key}" "$current_value"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
local final_message="[${COMPANY_NAME} | ${HOSTNAME}] ${full_message}"
|
||||||
|
curl -H "Authorization: Bearer ${NTFY_TOKEN}" -H "Title: ${full_title}" -d "${final_message}" "${NTFY_TOPIC_URL}" > /dev/null 2>&1
|
||||||
|
set_state "${alert_key}" "$current_value"
|
||||||
|
echo "🔔 Notification sent for ${alert_key}: ${full_message}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- HANA Process Status ---
|
||||||
|
echo "⚙️ Checking HANA process status..."
|
||||||
|
if [ ! -x "$SAPCONTROL_PATH" ]; then
|
||||||
|
echo "❌ Error: sapcontrol not found or not executable at ${SAPCONTROL_PATH}" >&2
|
||||||
|
send_notification_if_changed "hana_sapcontrol_path" "HANA Monitor Error" "sapcontrol not found or not executable at ${SAPCONTROL_PATH}" "true" "SAPCONTROL_ERROR"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
non_green_processes=$("${SAPCONTROL_PATH}" -nr "${HANA_INSTANCE_NR}" -function GetProcessList | tail -n +6 | grep -v 'GREEN')
|
||||||
|
|
||||||
|
if [ -n "$non_green_processes" ]; then
|
||||||
|
echo "🚨 Alert: One or more HANA processes are not running!" >&2
|
||||||
|
echo "$non_green_processes" >&2
|
||||||
|
send_notification_if_changed "hana_processes" "HANA Process" "One or more HANA processes are not GREEN. Problem processes: ${non_green_processes}" "true" "PROCESS_ALERT:${non_green_processes}"
|
||||||
|
exit 1 # Exit early as other checks might fail
|
||||||
|
else
|
||||||
|
send_notification_if_changed "hana_processes" "HANA Process" "All HANA processes are GREEN." "false" "OK"
|
||||||
|
echo "✅ Success! All HANA processes are GREEN."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Disk Space Monitoring ---
|
||||||
|
echo "ℹ️ Checking disk usage..."
|
||||||
|
for dir in "${DIRECTORIES_TO_MONITOR[@]}"; do
|
||||||
|
if [ ! -d "$dir" ]; then
|
||||||
|
echo "⚠️ Warning: Directory '$dir' not found. Skipping." >&2
|
||||||
|
send_notification_if_changed "disk_dir_not_found_${dir//\//_}" "HANA Disk Warning" "Directory '$dir' not found." "true" "DIR_NOT_FOUND"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
usage=$(df -h "$dir" | awk 'NR==2 {print $5}' | sed 's/%//')
|
||||||
|
echo " - ${dir} is at ${usage}%"
|
||||||
|
if (( $(echo "$usage > $DISK_USAGE_THRESHOLD" | bc -l) )); then
|
||||||
|
echo "🚨 Alert: ${dir} usage is at ${usage}% which is above the ${DISK_USAGE_THRESHOLD}% threshold." >&2
|
||||||
|
send_notification_if_changed "disk_usage_${dir//\//_}" "HANA Disk" "Disk usage for ${dir} is at ${usage}%." "true" "${usage}%"
|
||||||
|
else
|
||||||
|
send_notification_if_changed "disk_usage_${dir//\//_}" "HANA Disk" "Disk usage for ${dir} is at ${usage}% (below threshold)." "false" "OK"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# --- HANA Log Segment Monitoring ---
|
||||||
|
echo "⚙️ Executing HANA SQL query..."
|
||||||
|
if [ ! -x "$HDBSQL_PATH" ]; then
|
||||||
|
echo "❌ Error: hdbsql not found or not executable at ${HDBSQL_PATH}" >&2
|
||||||
|
send_notification_if_changed "hana_hdbsql_path" "HANA Monitor Error" "hdbsql not found or not executable at ${HDBSQL_PATH}" "true" "HDBSQL_ERROR"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
readarray -t sql_output < <("$HDBSQL_PATH" -U "$HANA_USER_KEY" -c ";" "$SQL_QUERY" 2>&1)
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "❌ Failure! The hdbsql command failed. Please check logs." >&2
|
||||||
|
error_message=$(printf '%s\n' "${sql_output[@]}")
|
||||||
|
send_notification_if_changed "hana_hdbsql_command" "HANA Monitor Error" "The hdbsql command failed. Details: ${error_message}" "true" "HDBSQL_COMMAND_FAILED"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
total_segments=0
|
||||||
|
truncated_segments=0
|
||||||
|
free_segments=0
|
||||||
|
for line in "${sql_output[@]}"; do
|
||||||
|
if [[ -z "$line" || "$line" == *"STATE"* ]]; then continue; fi
|
||||||
|
cleaned_line=$(echo "$line" | tr -d '"')
|
||||||
|
state=$(echo "$cleaned_line" | awk -F',' '{print $3}')
|
||||||
|
count=$(echo "$cleaned_line" | awk -F',' '{print $4}')
|
||||||
|
total_segments=$((total_segments + count))
|
||||||
|
if [[ "$state" == "Truncated" ]]; then
|
||||||
|
truncated_segments=$((truncated_segments + count))
|
||||||
|
elif [[ "$state" == "Free" ]]; then
|
||||||
|
free_segments=$((free_segments + count))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "ℹ️ Total Segments: ${total_segments}"
|
||||||
|
echo "ℹ️ Truncated Segments: ${truncated_segments}"
|
||||||
|
echo "ℹ️ Free Segments: ${free_segments}"
|
||||||
|
|
||||||
|
if [ $total_segments -eq 0 ]; then
|
||||||
|
echo "⚠️ Warning: No log segments found. Skipping percentage checks." >&2
|
||||||
|
send_notification_if_changed "hana_log_segments_total" "HANA Log Segment Warning" "No log segments found. Skipping percentage checks." "true" "NO_LOG_SEGMENTS"
|
||||||
|
else
|
||||||
|
send_notification_if_changed "hana_log_segments_total" "HANA Log Segment" "Log segments found." "false" "OK"
|
||||||
|
truncated_percentage=$((truncated_segments * 100 / total_segments))
|
||||||
|
if (( $(echo "$truncated_percentage > $TRUNCATED_PERCENTAGE_THRESHOLD" | bc -l) )); then
|
||||||
|
echo "🚨 Alert: ${truncated_percentage}% of log segments are 'Truncated'." >&2
|
||||||
|
send_notification_if_changed "hana_log_truncated" "HANA Log Segment" "${truncated_percentage}% of HANA log segments are in 'Truncated' state." "true" "${truncated_percentage}%"
|
||||||
|
else
|
||||||
|
send_notification_if_changed "hana_log_truncated" "HANA Log Segment" "${truncated_percentage}% of HANA log segments are in 'Truncated' state (below threshold)." "false" "OK"
|
||||||
|
fi
|
||||||
|
|
||||||
|
free_percentage=$((free_segments * 100 / total_segments))
|
||||||
|
if (( $(echo "$free_percentage < $FREE_PERCENTAGE_THRESHOLD" | bc -l) )); then
|
||||||
|
echo "🚨 Alert: Only ${free_percentage}% of log segments are 'Free'." >&2
|
||||||
|
send_notification_if_changed "hana_log_free" "HANA Log Segment" "Only ${free_percentage}% of HANA log segments are in 'Free' state." "true" "${free_percentage}%"
|
||||||
|
else
|
||||||
|
send_notification_if_changed "hana_log_free" "HANA Log Segment" "Only ${free_percentage}% of HANA log segments are in 'Free' state (above threshold)." "false" "OK"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- HANA Statement Queue Monitoring ---
|
||||||
|
echo "⚙️ Checking HANA statement queue..."
|
||||||
|
STATEMENT_QUEUE_SQL="SELECT COUNT(*) FROM M_SERVICE_THREADS WHERE THREAD_TYPE = 'SqlExecutor' AND THREAD_STATE = 'Queueing';"
|
||||||
|
queue_count=$("$HDBSQL_PATH" -U "$HANA_USER_KEY" -j -a -x "$STATEMENT_QUEUE_SQL" 2>/dev/null | tr -d '"')
|
||||||
|
|
||||||
|
if ! [[ "$queue_count" =~ ^[0-9]+$ ]]; then
|
||||||
|
echo "⚠️ Warning: Could not retrieve HANA statement queue count. Skipping check." >&2
|
||||||
|
send_notification_if_changed "hana_statement_queue_check_fail" "HANA Monitor Warning" "Could not retrieve statement queue count." "true" "QUEUE_CHECK_FAIL"
|
||||||
|
else
|
||||||
|
send_notification_if_changed "hana_statement_queue_check_fail" "HANA Monitor Warning" "Statement queue check is working." "false" "OK"
|
||||||
|
echo "ℹ️ Current statement queue length: ${queue_count}"
|
||||||
|
|
||||||
|
breach_count=$(get_state "statement_queue_breach_count")
|
||||||
|
breach_count=${breach_count:-0}
|
||||||
|
|
||||||
|
if (( queue_count > STATEMENT_QUEUE_THRESHOLD )); then
|
||||||
|
breach_count=$((breach_count + 1))
|
||||||
|
echo "📈 Statement queue is above threshold. Consecutive breach count: ${breach_count}/${STATEMENT_QUEUE_CONSECUTIVE_RUNS}."
|
||||||
|
else
|
||||||
|
breach_count=0
|
||||||
|
fi
|
||||||
|
set_state "statement_queue_breach_count" "$breach_count"
|
||||||
|
|
||||||
|
if (( breach_count >= STATEMENT_QUEUE_CONSECUTIVE_RUNS )); then
|
||||||
|
message="Statement queue has been over ${STATEMENT_QUEUE_THRESHOLD} for ${breach_count} checks. Current count: ${queue_count}."
|
||||||
|
send_notification_if_changed "hana_statement_queue_status" "HANA Statement Queue" "${message}" "true" "ALERT:${queue_count}"
|
||||||
|
else
|
||||||
|
message="Statement queue is normal. Current count: ${queue_count}."
|
||||||
|
send_notification_if_changed "hana_statement_queue_status" "HANA Statement Queue" "${message}" "false" "OK"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# --- HANA Backup Status Monitoring ---
|
||||||
|
echo "ℹ️ Checking last successful data backup status..."
|
||||||
|
last_backup_date=$("$HDBSQL_PATH" -U "$HANA_USER_KEY" -j -a -x \
|
||||||
|
"SELECT TOP 1 SYS_START_TIME FROM M_BACKUP_CATALOG WHERE ENTRY_TYPE_NAME = 'complete data backup' AND STATE_NAME = 'successful' ORDER BY SYS_START_TIME DESC" 2>/dev/null | tr -d "\"" | sed 's/\..*//')
|
||||||
|
|
||||||
|
if [[ -z "$last_backup_date" ]]; then
|
||||||
|
message="No successful complete data backup found for ${COMPANY_NAME} HANA."
|
||||||
|
echo "🚨 Critical: ${message}"
|
||||||
|
send_notification_if_changed "hana_backup_status" "HANA Backup" "${message}" "true" "NO_BACKUP"
|
||||||
|
else
|
||||||
|
last_backup_epoch=$(date -d "$last_backup_date" +%s)
|
||||||
|
current_epoch=$(date +%s)
|
||||||
|
threshold_seconds=$((BACKUP_THRESHOLD_HOURS * 3600))
|
||||||
|
age_seconds=$((current_epoch - last_backup_epoch))
|
||||||
|
age_hours=$((age_seconds / 3600))
|
||||||
|
|
||||||
|
if (( age_seconds > threshold_seconds )); then
|
||||||
|
message="Last successful HANA backup for ${COMPANY_NAME} is ${age_hours} hours old, which exceeds the threshold of ${BACKUP_THRESHOLD_HOURS} hours. Last backup was on: ${last_backup_date}."
|
||||||
|
echo "🚨 Critical: ${message}"
|
||||||
|
send_notification_if_changed "hana_backup_status" "HANA Backup" "${message}" "true" "${age_hours}h"
|
||||||
|
else
|
||||||
|
message="Last successful backup is ${age_hours} hours old (Threshold: ${BACKUP_THRESHOLD_HOURS} hours)."
|
||||||
|
echo "✅ Success! ${message}"
|
||||||
|
send_notification_if_changed "hana_backup_status" "HANA Backup" "${message}" "false" "OK"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ Success! HANA monitoring check complete."
|
||||||
Reference in New Issue
Block a user