diff --git a/cluster/cluster.go b/cluster/cluster.go index 116e74a49..5c8662fef 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -39,7 +39,7 @@ import ( "github.com/signal18/replication-manager/router/maxscale" "github.com/signal18/replication-manager/utils/alert/mailer" "github.com/signal18/replication-manager/utils/alert/slackman" - "github.com/signal18/replication-manager/utils/archiver" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/cron" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/logrus/hooks/pushover" @@ -206,7 +206,7 @@ type Cluster struct { QueryRules map[uint32]config.QueryRule `json:"-"` Backups []v3.Backup `json:"-"` BackupStat v3.BackupStat `json:"backupStat" groups:"web"` - BackupMetaMap *config.BackupMetaMap `json:"backupList" groups:"web"` + BackupMetaMap *backupmgr.BackupMetaMap `json:"backupList" groups:"web"` SLAHistory []state.Sla `json:"slaHistory" groups:"web"` APIUsers map[string]APIUser `json:"apiUsers" groups:"web"` Schedule map[string]cron.Entry `json:"-"` @@ -245,7 +245,8 @@ type Cluster struct { InResticBackup bool `json:"inResticBackup" groups:"web"` InRollingRestart bool `json:"inRollingRestart" groups:"web"` Mailer *mailer.Mailer `json:"-"` - ResticRepo *archiver.ResticRepo `json:"-"` + ResticManager *backupmgr.ResticManager `json:"-"` + MessageChan chan s18log.HttpMessage `json:"-"` ErrorConfigs config.ErrorConfigs `json:"-"` //To store error config Partner *config.Partner `json:"partner" groups:"web"` ConfigManager *manager.ConfigManager `json:"-"` @@ -371,6 +372,9 @@ func (cluster *Cluster) Init(confs *config.ConfVersion, cfgGroup string, tlog *s cluster.AgentMaxFreq = make(map[string]int64) cluster.ServiceTemplates = make([]string, 0) cluster.OpenSVCStats.Store([]opensvc.DaemonNodeStats{}) + cluster.MessageChan = make(chan s18log.HttpMessage, 10) + + go cluster.ConsumeMessageChan() *cluster.Conf = confs.ConfInit @@ -410,7 +414,7 @@ func (cluster *Cluster) InitFromConf() { cluster.runOnceAfterTopology = true cluster.testStopCluster = true cluster.testStartCluster = true - cluster.BackupMetaMap = config.NewBackupMetaMap() + cluster.BackupMetaMap = backupmgr.NewBackupMetaMap() cluster.VersionsMap = config.NewVersionsMap() cluster.WorkingDir = cluster.Conf.WorkingDir + "/" + cluster.Name @@ -585,7 +589,7 @@ func (cluster *Cluster) InitFromConf() { cluster.initScheduler() cluster.CheckDefaultUser(true) cluster.RefreshToolVersions() - cluster.StartResticRepo() + cluster.StartResticManager() cluster.Conf.TopologyTarget = cluster.GetTopologyFromConf() } @@ -979,8 +983,8 @@ func (cluster *Cluster) StateProcessing() { func (cluster *Cluster) Stop() { cluster.Lock() defer cluster.Unlock() - if cluster.ResticRepo != nil { - cluster.ResticRepo.ShutdownWorker() + if cluster.ResticManager != nil { + cluster.ResticManager.ShutdownWorker() } cluster.CloseRefreshTemplateMD5Worker() cluster.ConfigManager.SaveConfig(cluster, true) @@ -1757,7 +1761,7 @@ func (cluster *Cluster) MonitorSchema() { cluster.WorkLoad.DBIndexSize = totindexsize cluster.WorkLoad.DBTableSize = tottablesize - cmaster.DictTables = config.FromNormalTablesMap(cmaster.DictTables, tables) + cmaster.DictTables = dbhelper.FromNormalTablesMap(cmaster.DictTables, tables) cluster.StateMachine.RemoveMonitorSchemaState() } diff --git a/cluster/cluster_acl.go b/cluster/cluster_acl.go index 8b95568a0..afaaa52af 100644 --- a/cluster/cluster_acl.go +++ b/cluster/cluster_acl.go @@ -638,25 +638,20 @@ func (cluster *Cluster) IsURLPassACL(strUser string, URL string, errorPrint bool return true } } - if cluster.APIUsers[strUser].Grants[config.GrantClusterShowBackups] { - if strings.Contains(URL, "/api/clusters/"+cluster.Name+"/backups") { - return true - } - } if cluster.APIUsers[strUser].Grants[config.GrantClusterShowBackups] { if strings.Contains(URL, "/api/clusters/"+cluster.Name+"/backups") { return true } - if strings.HasSuffix(strings.TrimSuffix(URL, "/"), "/api/clusters/"+cluster.Name+"/archives") { + if strings.HasSuffix(strings.TrimSuffix(URL, "/"), "/api/clusters/"+cluster.Name+"/restic/snapshots") { return true } - if strings.HasSuffix(strings.TrimSuffix(URL, "/"), "/api/clusters/"+cluster.Name+"/archives/stats") { + if strings.HasSuffix(strings.TrimSuffix(URL, "/"), "/api/clusters/"+cluster.Name+"/restic/stats") { return true } } if cluster.APIUsers[strUser].Grants[config.GrantClusterProcess] { - if strings.Contains(URL, "/api/clusters/"+cluster.Name+"/archives") { + if strings.Contains(URL, "/api/clusters/"+cluster.Name+"/restic") { return true } } diff --git a/cluster/cluster_bash.go b/cluster/cluster_bash.go index c44cc2636..9c330a75e 100644 --- a/cluster/cluster_bash.go +++ b/cluster/cluster_bash.go @@ -16,6 +16,7 @@ import ( "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/utils/alert" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/state" ) @@ -227,9 +228,9 @@ func (cluster *Cluster) BinlogCopyScript(server *ServerMonitor, binlog string, i return nil } -func (cluster *Cluster) BackupPostScript(server *ServerMonitor, backtype config.BackupMethod, filepath string) error { +func (cluster *Cluster) BackupPostScript(server *ServerMonitor, backtype backupmgr.BackupMethod, filepath string) error { switch backtype { - case config.BackupMethodLogical: + case backupmgr.BackupMethodLogical: if cluster.Conf.BackupLogicalPostScript != "" { cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Calling backup logical post script at %s", cluster.Conf.BackupLogicalPostScript) var out []byte @@ -241,7 +242,7 @@ func (cluster *Cluster) BackupPostScript(server *ServerMonitor, backtype config. cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, "INFO", "Backup logical post script done: %s", string(out)) } - case config.BackupMethodPhysical: + case backupmgr.BackupMethodPhysical: if cluster.Conf.BackupPhysicalPostScript != "" { cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Calling backup physical post script at %s", cluster.Conf.BackupPhysicalPostScript) var out []byte diff --git a/cluster/cluster_bck.go b/cluster/cluster_bck.go index ce4ea2457..0283baf16 100644 --- a/cluster/cluster_bck.go +++ b/cluster/cluster_bck.go @@ -15,12 +15,11 @@ import ( "github.com/dustin/go-humanize" "github.com/shirou/gopsutil/disk" "github.com/signal18/replication-manager/config" - "github.com/signal18/replication-manager/utils/archiver" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" "github.com/signal18/replication-manager/utils/version" - "github.com/sirupsen/logrus" ) func (cluster *Cluster) ResticGetEnv() []string { @@ -53,17 +52,54 @@ func (cluster *Cluster) CheckResticInstallation() { } } -func (cluster *Cluster) StartResticRepo() error { +func (cluster *Cluster) CheckResticErrors() { if !cluster.Conf.BackupRestic { - return nil + return + } + + if cluster.ResticManager == nil { + cluster.StartResticManager() + } + + // If repo cannot be initialized, all other errors are not relevant. So we just fetch the init repo errors + if !cluster.ResticManager.CanInitRepo && cluster.ResticManager.HasAnyError() { + err := cluster.ResticManager.FetchAndClearError(backupmgr.InitTask) + cluster.SetState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err), ErrFrom: "BACKUP"}) + return + } + + for task, err := range cluster.ResticManager.FetchAndClearErrors() { + switch task { + case backupmgr.FetchTask: + cluster.SetState("WARN0093", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0093"], err), ErrFrom: "BACKUP"}) + case backupmgr.PurgeTask: + cluster.SetState("WARN0094", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0094"], err), ErrFrom: "BACKUP"}) + case backupmgr.UnlockTask: + cluster.SetState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err), ErrFrom: "BACKUP"}) + default: + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlErr, "Unknown restic task error: %s", err) + } } - var loglevel logrus.Level - if cluster.Conf.LogArchiveLevel > 0 { - loglevel = config.ToLogrusLevel(cluster.Conf.LogArchiveLevel) +} + +func (cluster *Cluster) CheckResticConfigBackup() { + if !cluster.Conf.BackupRestic { + return + } + + if err := cluster.BackupResticConfig(); err != nil { + cluster.SetState("WARN0145", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0145"], err), ErrFrom: "BACKUP"}) + } +} + +func (cluster *Cluster) StartResticManager() error { + if !cluster.Conf.BackupRestic { + return nil } - cluster.ResticRepo = archiver.NewResticRepo(cluster.Conf.BackupResticBinaryPath, cluster.Logrus, logrus.Fields{"cluster": cluster.Name, "type": "log", "module": "restic"}, loglevel) + cluster.ResticManager = backupmgr.NewResticRepo(cluster.Conf.BackupResticBinaryPath, cluster.MessageChan, config.ConstLogModRestic) + cluster.ResticManager.SetEnv(cluster.ResticGetEnv()) go cluster.ResticFetchRepo() return nil } @@ -73,8 +109,11 @@ func (cluster *Cluster) ResticInitRepo(force bool) error { return nil } - cluster.ResticRepo.SetEnv(cluster.ResticGetEnv()) - err := cluster.ResticRepo.ResticInitRepo(force) + if cluster.ResticManager == nil { + cluster.StartResticManager() + } + + err := cluster.ResticManager.InitRepo(force) if err != nil { cluster.SetState("WARN0092", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0092"], err), ErrFrom: "BACKUP"}) } @@ -82,6 +121,25 @@ func (cluster *Cluster) ResticInitRepo(force bool) error { return err } +func (cluster *Cluster) AddPurgeTask(snapshotID string) error { + if !cluster.Conf.BackupRestic { + return fmt.Errorf("Restic backup is not enabled") + } + + if cluster.ResticManager == nil { + cluster.StartResticManager() + } + + if snapshotID == "" { + return fmt.Errorf("Unable to purge single snapshot: snapshot ID is empty") + } + + cluster.ResticManager.AddPurgeTask(backupmgr.ResticPurgeOption{ + SnapshotID: snapshotID, + }) + return nil +} + func (cluster *Cluster) ResticPurgeRepo() error { if cluster.Conf.BackupRestic { err := cluster.Conf.CheckKeepWithin() // Check if backup-keep-within is valid @@ -90,9 +148,11 @@ func (cluster *Cluster) ResticPurgeRepo() error { return err } - cluster.ResticRepo.SetEnv(cluster.ResticGetEnv()) + if cluster.ResticManager == nil { + cluster.StartResticManager() + } - opt := archiver.ResticPurgeOption{ + cluster.ResticManager.AddPurgeTask(backupmgr.ResticPurgeOption{ KeepLast: cluster.Conf.BackupKeepLast, KeepHourly: cluster.Conf.BackupKeepHourly, KeepDaily: cluster.Conf.BackupKeepDaily, @@ -105,53 +165,26 @@ func (cluster *Cluster) ResticPurgeRepo() error { KeepWithinWeekly: cluster.Conf.BackupKeepWithinWeekly, KeepWithinMonthly: cluster.Conf.BackupKeepWithinMonthly, KeepWithinYearly: cluster.Conf.BackupKeepWithinYearly, - } + }) - _, err = cluster.ResticRepo.AddPurgeTask(opt, true) - if err != nil { - cluster.SetState("WARN0094", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0094"], err), ErrFrom: "BACKUP"}) - return err - } } return nil } -func (cluster *Cluster) ResticFetchRepo() error { +func (cluster *Cluster) ResticFetchRepo() { // No need to add wait since it will be checked each monitor loop if !cluster.Conf.BackupRestic { - return nil + return } - if cluster.ResticRepo == nil { - err := fmt.Errorf("restic repo is nil") - cluster.SetState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err), ErrFrom: "BACKUP"}) - return err + if cluster.ResticManager == nil { + cluster.StartResticManager() } // Check if no other fetch task queued - if cluster.ResticRepo.HasFetchQueue() { - return nil - } - - cluster.ResticRepo.SetEnv(cluster.ResticGetEnv()) - _, err := cluster.ResticRepo.AddFetchTask(true) - if err != nil { - if !cluster.ResticRepo.CanInitRepo { - cluster.SetState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err), ErrFrom: "BACKUP"}) - } else if cluster.ResticRepo.CanFetch && cluster.ResticRepo.HasLocks { - cluster.SetState("WARN0134", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0134"], cluster.ResticRepo.GetRepoPath()), ErrFrom: "BACKUP"}) - } else { - cluster.SetState("WARN0093", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0093"], err), ErrFrom: "BACKUP"}) - } - } else { - if _, err2 := os.Stat(filepath.Join(cluster.Conf.WorkingDir, cluster.Name, "restic.config.bak")); os.IsNotExist(err2) { - if err2 := cluster.BackupResticConfig(); err2 != nil { - cluster.SetState("WARN0145", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0145"], err2), ErrFrom: "BACKUP"}) - } - } + if !cluster.ResticManager.HasFetchQueue() { + cluster.ResticManager.AddFetchTask() } - - return err } func (cluster *Cluster) BackupResticConfig() error { @@ -159,7 +192,12 @@ func (cluster *Cluster) BackupResticConfig() error { return nil } - repopath := cluster.ResticRepo.GetRepoPath() + if _, err := os.Stat(filepath.Join(cluster.Conf.WorkingDir, cluster.Name, "restic.config.bak")); err == nil { + // Backup already exists + return nil + } + + repopath := cluster.ResticManager.GetRepoPath() if repopath == "" { return fmt.Errorf("restic repo path is empty") } @@ -181,7 +219,7 @@ func (cluster *Cluster) RestoreResticConfig(force bool) error { return nil } - repopath := cluster.ResticRepo.GetRepoPath() + repopath := cluster.ResticManager.GetRepoPath() if repopath == "" { return fmt.Errorf("restic repo path is empty") } @@ -203,62 +241,109 @@ func (cluster *Cluster) RestoreResticConfig(force bool) error { return nil } -func (cluster *Cluster) ResticUnlockRepo() error { +func (cluster *Cluster) ResticUnlockRepo() { // No need to add wait since it will be checked each monitor loop if !cluster.Conf.BackupRestic { - return nil + return } - if cluster.ResticRepo == nil { - err := fmt.Errorf("restic repo is nil") - cluster.SetState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err), ErrFrom: "BACKUP"}) - return err + if cluster.ResticManager == nil { + cluster.StartResticManager() } - cluster.ResticRepo.SetEnv(cluster.ResticGetEnv()) - _, err := cluster.ResticRepo.AddUnlockTask(true) - if err != nil { - cluster.SetState("WARN0093", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0093"], err), ErrFrom: "BACKUP"}) - } + cluster.ResticManager.AddUnlockTask() - return err } -func (cluster *Cluster) ResticGetQueue() ([]*archiver.ResticTask, error) { +func (cluster *Cluster) ResticGetQueue() ([]*backupmgr.ResticTask, error) { // No need to add wait since it will be checked each monitor loop if !cluster.Conf.BackupRestic { return nil, nil } - if cluster.ResticRepo == nil { - err := fmt.Errorf("restic repo is nil") - cluster.SetState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err), ErrFrom: "BACKUP"}) - return nil, err + if cluster.ResticManager == nil { + cluster.StartResticManager() + } + + return cluster.ResticManager.TaskQueue, nil +} + +func (cluster *Cluster) ResticModifyQueue(moveType string, taskID, cmpID int) error { + if !cluster.Conf.BackupRestic { + return nil + } + + if cluster.ResticManager == nil { + cluster.StartResticManager() } - return cluster.ResticRepo.TaskQueue, nil + return cluster.ResticManager.MoveTask(moveType, taskID, cmpID) } -func (cluster *Cluster) ResticResetQueue() error { +func (cluster *Cluster) ResticCancelTask(taskId int) error { // No need to add wait since it will be checked each monitor loop if !cluster.Conf.BackupRestic { return nil } - if cluster.ResticRepo == nil { - err := fmt.Errorf("restic repo is nil") - cluster.SetState("WARN0095", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0095"], err), ErrFrom: "BACKUP"}) - return err + if cluster.ResticManager == nil { + cluster.StartResticManager() + } + + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Cancelling restic task ID %d", taskId) + + cluster.ResticManager.CancelTask(taskId) + + return nil +} + +func (cluster *Cluster) ResticClearQueue() error { + // No need to add wait since it will be checked each monitor loop + if !cluster.Conf.BackupRestic { + return nil + } + + if cluster.ResticManager == nil { + cluster.StartResticManager() } - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Resetting restic queue. This will not affect the current running task.") + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Clearing pending restic tasks from queue. Total tasks: %d", len(cluster.ResticManager.TaskQueue)) - cluster.ResticRepo.SetEnv(cluster.ResticGetEnv()) - cluster.ResticRepo.EmptyQueue() + cluster.ResticManager.ClearQueue() return nil } +// ResticRunQueue starts processing the restic task queue +func (cluster *Cluster) ResticRunQueue() { + + if !cluster.Conf.BackupRestic { + return + } + + if cluster.ResticManager == nil { + cluster.StartResticManager() + } + + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Starting restic task queue processing. Total tasks: %d", len(cluster.ResticManager.TaskQueue)) + cluster.ResticManager.ResumeWorker() +} + +// ResticPauseQueue pauses the next restic task queue processing +func (cluster *Cluster) ResticPauseQueue() { + // No need to add wait since it will be checked each monitor loop + if !cluster.Conf.BackupRestic { + return + } + + if cluster.ResticManager == nil { + cluster.StartResticManager() + } + + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Pausing restic task queue processing") + cluster.ResticManager.PauseWorker() +} + func (cluster *Cluster) CheckBackupFreeSpace(backtype string, backup bool) error { var isWarning bool bcksrv := cluster.GetBackupServer() @@ -394,13 +479,13 @@ func (cluster *Cluster) ChangeResticRepoPassword(newpass string) error { return fmt.Errorf("New password is the same as the current one") } - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlInfo, "Changing restic password for cluster %s", cluster.Name) + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlInfo, "Changing restic password for cluster %s", cluster.Name) - cluster.ResticRepo.SetEnv(cluster.ResticGetEnv()) + cluster.ResticManager.SetEnv(cluster.ResticGetEnv()) - keylist, err := cluster.ResticRepo.ResticKeyList() + keylist, err := cluster.ResticManager.GetRepoKeyList() if err != nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlErr, "Failed to list restic keys: %s", err) + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlErr, "Failed to list restic keys: %s", err) return err } @@ -417,60 +502,60 @@ func (cluster *Cluster) ChangeResticRepoPassword(newpass string) error { } } - if _, err := os.Stat(cluster.ResticRepo.GetCacheDirPath()); os.IsNotExist(err) { - err := os.MkdirAll(cluster.ResticRepo.GetCacheDirPath(), os.ModePerm) + if _, err := os.Stat(cluster.ResticManager.GetCacheDirPath()); os.IsNotExist(err) { + err := os.MkdirAll(cluster.ResticManager.GetCacheDirPath(), os.ModePerm) if err != nil { return fmt.Errorf("Error creating restic cache directory: %s", err) } - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlInfo, "Restic cache directory created: %s", cluster.ResticRepo.GetCacheDirPath()) + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlInfo, "Restic cache directory created: %s", cluster.ResticManager.GetCacheDirPath()) } - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlInfo, "Adding new key to restic repository") + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlInfo, "Adding new key to restic repository") - newpassfile := filepath.Join(cluster.ResticRepo.GetCacheDirPath(), "newpass.txt") + newpassfile := filepath.Join(cluster.ResticManager.GetCacheDirPath(), "newpass.txt") err = os.WriteFile(newpassfile, []byte(newpass), 0600) if err != nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlErr, "Failed to write new password file: %s", err) + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlErr, "Failed to write new password file: %s", err) return fmt.Errorf("failed to write new password file: %w", err) } - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlInfo, "Temporary password file created: %s", newpassfile) + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlInfo, "Temporary password file created: %s", newpassfile) defer func() { if _, err := os.Stat(newpassfile); err == nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlInfo, "Removing temporary password file") + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlInfo, "Removing temporary password file") err := os.Remove(newpassfile) if err != nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlErr, "Failed to remove temporary password file: %s", err) + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlErr, "Failed to remove temporary password file: %s", err) } } }() - err = cluster.ResticRepo.ResticAddPassword(newpassfile) + err = cluster.ResticManager.AddRepoKey(newpassfile) if err != nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlErr, "Failed to add new key to restic repository: %s", err) + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlErr, "Failed to add new key to restic repository: %s", err) return err } - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlInfo, "New key added to restic repository successfully. Saving new password.") + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlInfo, "New key added to restic repository successfully. Saving new password.") // Save new password in configuration cluster.SetResticPassword(newpass) - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlInfo, "New restic password saved in configuration successfully. Removing old key from repository using new password.") + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlInfo, "New restic password saved in configuration successfully. Removing old key from repository using new password.") // Reload env with new password - cluster.ResticRepo.SetEnv(cluster.ResticGetEnv()) + cluster.ResticManager.SetEnv(cluster.ResticGetEnv()) // Remove old key using new password - err = cluster.ResticRepo.ResticRemoveKey(oldkeyid) + err = cluster.ResticManager.RemoveRepoKey(oldkeyid) if err != nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlErr, "Failed to remove old key from restic repository: %s", err) + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlErr, "Failed to remove old key from restic repository: %s", err) return nil } - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModArchive, config.LvlInfo, "Restic password changed successfully. New key added and old key removed.") + cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModRestic, config.LvlInfo, "Restic password changed successfully. New key added and old key removed.") return nil } diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index 70df7f85d..bc1601ead 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -27,7 +27,7 @@ import ( "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/opensvc" "github.com/signal18/replication-manager/peer" - "github.com/signal18/replication-manager/utils/archiver" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/cron" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" @@ -1135,20 +1135,36 @@ func (cluster *Cluster) GetTableDLLNoFK(schema string, table string, srv *Server return ddl, err } -func (cluster *Cluster) GetBackups() []archiver.Backup { - if cluster.ResticRepo == nil { - return make([]archiver.Backup, 0) +func (cluster *Cluster) GetBackups() map[int64]*backupmgr.BackupMetadata { + return cluster.BackupMetaMap.ToNewMap() +} + +func (cluster *Cluster) GetBackupStat() *backupmgr.BackupStat { + backupStat := &backupmgr.BackupStat{} + + backups := cluster.GetBackups() + for _, backup := range backups { + backupStat.TotalFileCount += backup.FileCount + backupStat.TotalSize += backup.Size + } + + return backupStat +} + +func (cluster *Cluster) GetSnapshots() []backupmgr.BackupSnapshot { + if cluster.ResticManager == nil { + return make([]backupmgr.BackupSnapshot, 0) } - return cluster.ResticRepo.Backups + return cluster.ResticManager.Backups } -func (cluster *Cluster) GetBackupStat() archiver.BackupStat { - if cluster.ResticRepo == nil { - return archiver.BackupStat{} +func (cluster *Cluster) GetSnapshotStats() backupmgr.BackupStat { + if cluster.ResticManager == nil { + return backupmgr.BackupStat{} } - return cluster.ResticRepo.BackupStat + return cluster.ResticManager.BackupStat } func (cluster *Cluster) GetQueryRules() []config.QueryRule { diff --git a/cluster/cluster_job.go b/cluster/cluster_job.go index 2df5c3d5e..8d68c86fc 100644 --- a/cluster/cluster_job.go +++ b/cluster/cluster_job.go @@ -19,6 +19,7 @@ import ( "time" "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/state" ) @@ -147,7 +148,7 @@ func (cluster *Cluster) JobMyLoaderParseMeta(dir string) (config.MyDumperMetaDat } } -func (cluster *Cluster) JobParseMyDumperMeta(meta *config.BackupMetadata) error { +func (cluster *Cluster) JobParseMyDumperMeta(meta *backupmgr.BackupMetadata) error { var m config.MyDumperMetaData var err error diff --git a/cluster/cluster_log.go b/cluster/cluster_log.go index 1f56ed800..2a5b2dc76 100644 --- a/cluster/cluster_log.go +++ b/cluster/cluster_log.go @@ -220,7 +220,7 @@ func (cluster *Cluster) LogModulePrintf(forcingLog bool, module int, level strin } line = cluster.htlog.Add(msg) switch module { - case config.ConstLogModTask, config.ConstLogModSST, config.ConstLogModBackupStream, config.ConstLogModDbErrors, config.ConstLogModDbSqlErrors, config.ConstLogModDbSlowquery, config.ConstLogModDbOptimize, config.ConstLogModDbAudit: + case config.ConstLogModTask, config.ConstLogModSST, config.ConstLogModBackupStream, config.ConstLogModDbErrors, config.ConstLogModDbSqlErrors, config.ConstLogModDbSlowquery, config.ConstLogModDbOptimize, config.ConstLogModDbAudit, config.ConstLogModRestic: cluster.LogTask.Add(msg) default: cluster.Log.Add(msg) @@ -506,3 +506,9 @@ func (cluster *Cluster) LogPanicToFile(task string) { } } + +func (cluster *Cluster) ConsumeMessageChan() { + for msg := range cluster.MessageChan { + cluster.LogModulePrintf(cluster.Conf.Verbose, msg.Module, msg.Level, msg.Text) + } +} diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index bc578ce69..3cf0d7425 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -2191,15 +2191,8 @@ func (cluster *Cluster) SetLogBinlogPurgeLevel(value int) { } } -func (cluster *Cluster) SetLogArchiveLevel(value int) { - var lvl logrus.Level - cluster.Conf.LogArchiveLevel = value - if cluster.ResticRepo != nil { - if value > 0 { - lvl = config.ToLogrusLevel(value) - } - cluster.ResticRepo.SetLogLevel(lvl) - } +func (cluster *Cluster) SetLogResticLevel(value int) { + cluster.Conf.LogResticLevel = value } func (cluster *Cluster) SetLogMailerLevel(value int) { diff --git a/cluster/cluster_tgl.go b/cluster/cluster_tgl.go index 1d7411b29..669de0a7b 100644 --- a/cluster/cluster_tgl.go +++ b/cluster/cluster_tgl.go @@ -138,8 +138,8 @@ func (cluster *Cluster) SwitchProvDockerDaemonPrivate() { func (cluster *Cluster) SwitchBackupRestic() { cluster.Conf.BackupRestic = !cluster.Conf.BackupRestic cluster.CheckResticInstallation() - if cluster.ResticRepo == nil { - cluster.StartResticRepo() + if cluster.ResticManager == nil { + cluster.StartResticManager() } } diff --git a/cluster/configurator/configurator.go b/cluster/configurator/configurator.go index ad05f4dbf..46e9f8dda 100644 --- a/cluster/configurator/configurator.go +++ b/cluster/configurator/configurator.go @@ -21,6 +21,7 @@ import ( v3 "github.com/signal18/replication-manager/repmanv3" "github.com/signal18/replication-manager/share" "github.com/signal18/replication-manager/utils/crypto" + "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/sirupsen/logrus" ) @@ -122,7 +123,7 @@ func (configurator *Configurator) LoadProxyModules() error { return nil } -func (configurator *Configurator) ConfigDiscovery(Variables *config.StringsMap, Plugins *config.PluginsMap) error { +func (configurator *Configurator) ConfigDiscovery(Variables *config.StringsMap, Plugins *dbhelper.PluginsMap) error { pmap := Plugins.ToNewMap() vmap := Variables.ToNewMap() innodbmem, err := strconv.ParseUint(Variables.Get("INNODB_BUFFER_POOL_SIZE"), 10, 64) diff --git a/cluster/srv.go b/cluster/srv.go index 26b229ea0..e529e2d70 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -30,7 +30,7 @@ import ( "github.com/hpcloud/tail" "github.com/jmoiron/sqlx" "github.com/signal18/replication-manager/config" - v3 "github.com/signal18/replication-manager/repmanv3" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/gtid" "github.com/signal18/replication-manager/utils/misc" @@ -168,14 +168,14 @@ type ServerMonitor struct { SlowLog s18log.SlowLog `json:"-"` Status *config.StringsMap `json:"-"` PrevStatus *config.StringsMap `json:"-"` - PFSQueries *config.PFSQueriesMap `json:"-"` //PFS queries + PFSQueries *dbhelper.PFSQueriesMap `json:"-"` //PFS queries PFSInstruments *config.StringsMap `json:"pfsInstruments"` - SlowPFSQueries *config.PFSQueriesMap `json:"-"` //PFS queries from slow - DictTables *config.TablesMap `json:"-"` - Tables []v3.Table `json:"-"` + SlowPFSQueries *dbhelper.PFSQueriesMap `json:"-"` //PFS queries from slow + DictTables *dbhelper.TablesMap `json:"-"` + Tables []dbhelper.Table `json:"-"` Disks []dbhelper.Disk `json:"-"` - Plugins *config.PluginsMap `json:"-"` - Users *config.GrantsMap `json:"-"` + Plugins *dbhelper.PluginsMap `json:"-"` + Users *dbhelper.GrantsMap `json:"-"` MetaDataLocks []dbhelper.MetaDataLock `json:"-"` ErrorLogTailer *tail.Tail `json:"-"` SlowLogTailer *tail.Tail `json:"-"` @@ -213,7 +213,7 @@ type ServerMonitor struct { IsRefreshingBinlogMeta bool IsLoadingJobList bool NeedRefreshJobs bool - PointInTimeMeta config.PointInTimeMeta + PointInTimeMeta backupmgr.PointInTimeMeta BinaryLogDir string BinaryLogName string DBDataDir string @@ -224,8 +224,8 @@ type ServerMonitor struct { } type ServerBackupMeta struct { - Logical *config.BackupMetadata `json:"logical"` - Physical *config.BackupMetadata `json:"physical"` + Logical *backupmgr.BackupMetadata `json:"logical"` + Physical *backupmgr.BackupMetadata `json:"physical"` } type SlaveVariables struct { @@ -273,7 +273,7 @@ const ( func (cluster *Cluster) newServerMonitor(url string, user string, pass string, compute bool, domain string, source string) (*ServerMonitor, error) { var err error server := new(ServerMonitor) - server.Tables = make([]v3.Table, 0) + server.Tables = make([]dbhelper.Table, 0) server.HostCnf = url // store host from config file server.QPS = 0 server.IsCompute = compute @@ -286,8 +286,8 @@ func (cluster *Cluster) newServerMonitor(url string, user string, pass string, c server.IsGroupReplicationSlave = false server.IsGroupReplicationMaster = false server.JobResults = config.NewTasksMap() - server.LastBackupMeta.Physical = new(config.BackupMetadata) - server.LastBackupMeta.Logical = new(config.BackupMetadata) + server.LastBackupMeta.Physical = new(backupmgr.BackupMetadata) + server.LastBackupMeta.Logical = new(backupmgr.BackupMetadata) server.BinaryLogMetaToWrite = make([]string, 0) server.BinaryLogMetaToRemove = make([]string, 0) server.NeedRefreshJobs = true @@ -331,12 +331,12 @@ func (cluster *Cluster) newServerMonitor(url string, user string, pass string, c server.EngineInnoDB = config.NewStringsMap() server.Status = config.NewStringsMap() server.PrevStatus = config.NewStringsMap() - server.PFSQueries = config.NewPFSQueriesMap() server.PFSInstruments = config.NewStringsMap() - server.SlowPFSQueries = config.NewPFSQueriesMap() - server.DictTables = config.NewTablesMap() - server.Plugins = config.NewPluginsMap() - server.Users = config.NewGrantsMap() + server.PFSQueries = dbhelper.NewPFSQueriesMap() + server.SlowPFSQueries = dbhelper.NewPFSQueriesMap() + server.DictTables = dbhelper.NewTablesMap() + server.Plugins = dbhelper.NewPluginsMap() + server.Users = dbhelper.NewGrantsMap() server.BinaryLogFiles = dbhelper.NewBinaryLogMetaMap() server.WorkLoad = config.NewWorkLoadsMap() @@ -942,7 +942,7 @@ func (server *ServerMonitor) Refresh() error { // get Users users, logs, err := dbhelper.GetUsers(server.Conn, server.DBVersion) - server.Users = config.FromNormalGrantsMap(server.Users, users) + server.Users = dbhelper.FromNormalGrantsMap(server.Users, users) cluster.LogSQL(logs, err, server.URL, "Monitor", config.LvlDbg, "Could not get database users %s %s", server.URL, err) if cluster.Conf.MonitorScheduler { @@ -1123,7 +1123,7 @@ func (server *ServerMonitor) Refresh() error { if cluster.StateMachine.GetHeartbeats()%60 == 0 { if cluster.Conf.MonitorPlugins { plugins, _, _ := dbhelper.GetPlugins(server.Conn, server.DBVersion) - server.Plugins = config.FromNormalPluginsMap(server.Plugins, plugins) + server.Plugins = dbhelper.FromNormalPluginsMap(server.Plugins, plugins) server.HaveMetaDataLocksLog = server.HasInstallPlugin("METADATA_LOCK_INFO") server.HaveQueryResponseTimeLog = server.HasInstallPlugin("QUERY_RESPONSE_TIME") server.HaveDiskMonitor = server.HasInstallPlugin("DISK") diff --git a/cluster/srv_bck.go b/cluster/srv_bck.go index f6ef65bcc..0e6e91f5b 100644 --- a/cluster/srv_bck.go +++ b/cluster/srv_bck.go @@ -17,6 +17,7 @@ import ( "time" "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/backupmgr" ) func (server *ServerMonitor) FetchLastBackupMetadata() { @@ -73,7 +74,7 @@ func (server *ServerMonitor) AppendLastMetadata(method string, latest *int64) { } } -func (server *ServerMonitor) ReadLastMetadata(method string) (*config.BackupMetadata, error) { +func (server *ServerMonitor) ReadLastMetadata(method string) (*backupmgr.BackupMetadata, error) { var filename string = method var ext string = ".meta.json" @@ -89,7 +90,7 @@ func (server *ServerMonitor) ReadLastMetadata(method string) (*config.BackupMeta } defer file.Close() - meta := new(config.BackupMetadata) + meta := new(backupmgr.BackupMetadata) err = json.NewDecoder(file).Decode(meta) if err != nil { return nil, err @@ -98,20 +99,20 @@ func (server *ServerMonitor) ReadLastMetadata(method string) (*config.BackupMeta return meta, nil } -func (server *ServerMonitor) GetLatestMeta(method string) (int64, *config.BackupMetadata) { +func (server *ServerMonitor) GetLatestMeta(method string) (int64, *backupmgr.BackupMetadata) { cluster := server.ClusterGroup var latest int64 = 0 - var meta *config.BackupMetadata + var meta *backupmgr.BackupMetadata cluster.BackupMetaMap.Range(func(k, v any) bool { - m := v.(*config.BackupMetadata) + m := v.(*backupmgr.BackupMetadata) valid := false switch method { case "logical": - if m.BackupMethod == config.BackupMethodLogical { + if m.BackupMethod == backupmgr.BackupMethodLogical { valid = true } case "physical": - if m.BackupMethod == config.BackupMethodPhysical { + if m.BackupMethod == backupmgr.BackupMethodPhysical { valid = true } default: @@ -135,12 +136,12 @@ func (server *ServerMonitor) GetLatestMeta(method string) (int64, *config.Backup return latest, meta } -func (server *ServerMonitor) ReseedPointInTime(meta config.PointInTimeMeta) error { +func (server *ServerMonitor) ReseedPointInTime(meta backupmgr.PointInTimeMeta) error { var err error cluster := server.ClusterGroup - server.SetPointInTimeMeta(meta) //Set for PITR - defer server.SetPointInTimeMeta(config.PointInTimeMeta{}) //Reset after done + server.SetPointInTimeMeta(meta) //Set for PITR + defer server.SetPointInTimeMeta(backupmgr.PointInTimeMeta{}) //Reset after done backup := cluster.BackupMetaMap.Get(meta.Backup) if backup == nil { @@ -205,8 +206,8 @@ func (server *ServerMonitor) ReseedPointInTime(meta config.PointInTimeMeta) erro return fmt.Errorf("Source not found") } - start := config.ReadBinaryLogsBoundary{Filename: backup.BinLogFileName, Position: int64(backup.BinLogFilePos)} - end := config.ReadBinaryLogsBoundary{UseTimestamp: true, Timestamp: time.Unix(meta.RestoreTime, 0)} + start := backupmgr.ReadBinaryLogsBoundary{Filename: backup.BinLogFileName, Position: int64(backup.BinLogFilePos)} + end := backupmgr.ReadBinaryLogsBoundary{UseTimestamp: true, Timestamp: time.Unix(meta.RestoreTime, 0)} err = source.ReadAndExecBinaryLogsWithinRange(start, end, server) if err != nil { cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModTask, config.LvlErr, "Error while applying binlogs on %s. err: %s", server.URL, err.Error()) @@ -218,10 +219,10 @@ func (server *ServerMonitor) ReseedPointInTime(meta config.PointInTimeMeta) erro return nil } -func (server *ServerMonitor) InjectViaBinlogs(meta config.PointInTimeMeta) error { +func (server *ServerMonitor) InjectViaBinlogs(meta backupmgr.PointInTimeMeta) error { return nil } -func (server *ServerMonitor) InjectViaReplication(meta config.PointInTimeMeta) error { +func (server *ServerMonitor) InjectViaReplication(meta backupmgr.PointInTimeMeta) error { return nil } diff --git a/cluster/srv_binlog.go b/cluster/srv_binlog.go index 33187426a..d7c31d875 100644 --- a/cluster/srv_binlog.go +++ b/cluster/srv_binlog.go @@ -28,6 +28,7 @@ import ( "github.com/go-mysql-org/go-mysql/mysql" "github.com/go-mysql-org/go-mysql/replication" "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" @@ -816,10 +817,10 @@ type LogEvent struct { Query string } -func (server *ServerMonitor) ReadAndExecBinaryLogsWithinRange(start config.ReadBinaryLogsBoundary, end config.ReadBinaryLogsBoundary, dest *ServerMonitor) error { +func (server *ServerMonitor) ReadAndExecBinaryLogsWithinRange(start backupmgr.ReadBinaryLogsBoundary, end backupmgr.ReadBinaryLogsBoundary, dest *ServerMonitor) error { cluster := server.ClusterGroup binlogs := server.BinaryLogFiles.GetKeys() - readStart := config.ReadBinaryLogsBoundary(start) + readStart := backupmgr.ReadBinaryLogsBoundary(start) hasReadOnce := false if end.Filename == "" { @@ -873,7 +874,7 @@ func (server *ServerMonitor) ReadAndExecBinaryLogsWithinRange(start config.ReadB return nil } -func (server *ServerMonitor) GetBinlogPositionFromTimestamp(start uint32, end *config.ReadBinaryLogsBoundary) error { +func (server *ServerMonitor) GetBinlogPositionFromTimestamp(start uint32, end *backupmgr.ReadBinaryLogsBoundary) error { cluster := server.ClusterGroup port, _ := strconv.Atoi(server.Port) @@ -926,7 +927,7 @@ func (server *ServerMonitor) GetBinlogPositionFromTimestamp(start uint32, end *c } } -func (server *ServerMonitor) ReadAndApplyBinaryLogsWithinRange(start config.ReadBinaryLogsBoundary, end config.ReadBinaryLogsBoundary, dest *ServerMonitor) error { +func (server *ServerMonitor) ReadAndApplyBinaryLogsWithinRange(start backupmgr.ReadBinaryLogsBoundary, end backupmgr.ReadBinaryLogsBoundary, dest *ServerMonitor) error { cluster := server.ClusterGroup if _, err := os.Stat(cluster.GetMysqlBinlogPath()); os.IsNotExist(err) { diff --git a/cluster/srv_get.go b/cluster/srv_get.go index 53fcff5de..377176d58 100644 --- a/cluster/srv_get.go +++ b/cluster/srv_get.go @@ -22,7 +22,6 @@ import ( "github.com/jmoiron/sqlx" "github.com/signal18/replication-manager/config" - v3 "github.com/signal18/replication-manager/repmanv3" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/s18log" "github.com/signal18/replication-manager/utils/state" @@ -281,8 +280,8 @@ func (server *ServerMonitor) GetNumberOfEventsAfterPos(file string, pos string) return dbhelper.GetNumberOfEventsAfterPos(server.Conn, file, pos) } -func (server *ServerMonitor) GetTableFromDict(URI string) (*v3.Table, error) { - var emptyTable *v3.Table = new(v3.Table) +func (server *ServerMonitor) GetTableFromDict(URI string) (*dbhelper.Table, error) { + var emptyTable *dbhelper.Table = new(dbhelper.Table) val, ok := server.DictTables.CheckAndGet(URI) if !ok { if len(server.DictTables.ToNewMap()) == 0 { @@ -461,7 +460,7 @@ func (server *ServerMonitor) GetPFSQueries() { logs := "" // GET PFS query digest pfsq, logs, err := dbhelper.GetQueries(server.Conn) - server.PFSQueries = config.FromNormalPFSMap(server.PFSQueries, pfsq) + server.PFSQueries = dbhelper.FromNormalPFSMap(server.PFSQueries, pfsq) server.ClusterGroup.LogSQL(logs, err, server.URL, "Monitor", config.LvlDbg, "Could not get queries %s %s", server.URL, err) } @@ -699,19 +698,19 @@ func (server *ServerMonitor) GetSlowLogTable(wg *sync.WaitGroup) error { return nil } -func (server *ServerMonitor) GetTables() []v3.Table { +func (server *ServerMonitor) GetTables() []dbhelper.Table { if server.Tables == nil { - server.Tables = make([]v3.Table, 0) + server.Tables = make([]dbhelper.Table, 0) } return server.Tables } -func (server *ServerMonitor) GetVTables() map[string]*v3.Table { +func (server *ServerMonitor) GetVTables() map[string]*dbhelper.Table { return server.DictTables.ToNewMap() } -func (server *ServerMonitor) GetDictTables() []*v3.Table { - var tables []*v3.Table +func (server *ServerMonitor) GetDictTables() []*dbhelper.Table { + var tables []*dbhelper.Table if server.IsFailed() { return tables } diff --git a/cluster/srv_job.go b/cluster/srv_job.go index a350912a1..331a6cc13 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -34,6 +34,7 @@ import ( gzip "github.com/klauspost/pgzip" dumplingext "github.com/pingcap/dumpling/v4/export" "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/crypto" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" @@ -389,11 +390,11 @@ func (server *ServerMonitor) JobBackupPhysical() error { cluster.BackupMetaMap.Delete(prevId) } - server.LastBackupMeta.Physical = &config.BackupMetadata{ + server.LastBackupMeta.Physical = &backupmgr.BackupMetadata{ Id: now.Unix(), StartTime: now, - BackupMethod: config.BackupMethodPhysical, - BackupStrategy: config.BackupStrategyFull, + BackupMethod: backupmgr.BackupMethodPhysical, + BackupStrategy: backupmgr.BackupStrategyFull, BackupTool: cluster.Conf.BackupPhysicalType, Source: server.URL, Dest: dest, @@ -2347,12 +2348,12 @@ func (server *ServerMonitor) JobBackupLogical() error { cluster.BackupMetaMap.Delete(prevId) } - server.LastBackupMeta.Logical = &config.BackupMetadata{ + server.LastBackupMeta.Logical = &backupmgr.BackupMetadata{ Id: start.Unix(), StartTime: start, - BackupMethod: config.BackupMethodLogical, + BackupMethod: backupmgr.BackupMethodLogical, BackupTool: cluster.Conf.BackupLogicalType, - BackupStrategy: config.BackupStrategyFull, + BackupStrategy: backupmgr.BackupStrategyFull, Source: server.URL, Previous: prevId, } @@ -2427,7 +2428,7 @@ func (server *ServerMonitor) JobBackupLogical() error { _, e3 := os.Stat(filename) if e3 == nil { server.LastBackupMeta.Logical.EndTime = time.Now() - server.LastBackupMeta.Logical.GetSize() + server.LastBackupMeta.Logical.GetSizeAndFileCount() server.LastBackupMeta.Logical.Completed = true server.SetBackupLogicalCookie(config.ConstBackupLogicalTypeMysqldump) } @@ -2456,7 +2457,7 @@ func (server *ServerMonitor) JobBackupLogical() error { _, e3 := os.Stat(outputdir) if e3 == nil { server.LastBackupMeta.Logical.EndTime = time.Now() - server.LastBackupMeta.Logical.GetSize() + server.LastBackupMeta.Logical.GetSizeAndFileCount() server.LastBackupMeta.Logical.Completed = true server.SetBackupLogicalCookie(config.ConstBackupLogicalTypeDumpling) } @@ -2486,7 +2487,7 @@ func (server *ServerMonitor) JobBackupLogical() error { _, e3 := os.Stat(outputdir) if e3 == nil { server.LastBackupMeta.Logical.EndTime = time.Now() - server.LastBackupMeta.Logical.GetSize() + server.LastBackupMeta.Logical.GetSizeAndFileCount() server.LastBackupMeta.Logical.Completed = true server.SetBackupLogicalCookie(config.ConstBackupLogicalTypeDumpling) } @@ -2506,7 +2507,7 @@ func (server *ServerMonitor) JobBackupLogical() error { } } - server.WriteBackupMetadata(config.BackupMethodLogical) + server.WriteBackupMetadata(backupmgr.BackupMethodLogical) if err == nil { cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "[SUCCESS] Finish logical backup %s for: %s", cluster.Conf.BackupLogicalType, server.URL) } else { @@ -2598,71 +2599,13 @@ func (server *ServerMonitor) myDumperCopyLogs(r io.Reader, module int, level str return valid } -func (server *ServerMonitor) BackupRestic(tags ...string) error { +func (server *ServerMonitor) BackupRestic(tags ...string) { cluster := server.ClusterGroup - var stdout, stderr []byte - var errStdout, errStderr error - - if cluster.Conf.BackupRestic { - // Wait for fetch or purge, so it will not conflict - if !cluster.canResticFetchRepo { - time.Sleep(time.Second) - return server.BackupRestic(tags...) - } - cluster.SetInResticBackupState(true) - defer cluster.SetInResticBackupState(false) - - args := make([]string, 0) - - args = append(args, "backup") - for _, tag := range tags { - if tag != "" { - args = append(args, "--tag") - args = append(args, tag) - } - } - args = append(args, server.GetMyBackupDirectory()) - - resticcmd := exec.Command(cluster.Conf.BackupResticBinaryPath, args...) - - stdoutIn, _ := resticcmd.StdoutPipe() - stderrIn, _ := resticcmd.StderrPipe() - - //out, err := resticcmd.CombinedOutput() - - resticcmd.Env = cluster.ResticGetEnv() - - if err := resticcmd.Start(); err != nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModTask, config.LvlErr, "Failed restic command : %s %s", resticcmd.Path, err) - return err - } - - // cmd.Wait() should be called only after we finish reading - // from stdoutIn and stderrIn. - // wg ensures that we finish - var wg sync.WaitGroup - wg.Add(1) - go func() { - stdout, errStdout = server.copyAndCapture(os.Stdout, stdoutIn) - wg.Done() - }() - - stderr, errStderr = server.copyAndCapture(os.Stderr, stderrIn) - - wg.Wait() - - err := resticcmd.Wait() - if err != nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModTask, config.LvlErr, "%s\n", err) - } - if errStdout != nil || errStderr != nil { - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModTask, config.LvlErr, "Failed to capture stdout or stderr\n") - } - outStr, errStr := string(stdout), string(stderr) - cluster.LogModulePrintf(cluster.Conf.Verbose, config.ConstLogModTask, config.LvlInfo, "result:%s\n%s\n%s", resticcmd.Path, outStr, errStr) - + if !cluster.Conf.BackupRestic { + return } - return nil + + cluster.ResticManager.AddBackupTask(server.GetMyBackupDirectory(), tags) } func (server *ServerMonitor) copyAndCapture(w io.Writer, r io.Reader) ([]byte, error) { @@ -3374,15 +3317,15 @@ func (server *ServerMonitor) ParseLogEntries(entry config.LogEntry, mod int, tas return nil } -func (server *ServerMonitor) WriteBackupMetadata(backtype config.BackupMethod) { +func (server *ServerMonitor) WriteBackupMetadata(backtype backupmgr.BackupMethod) { cluster := server.ClusterGroup - var lastmeta *config.BackupMetadata + var lastmeta *backupmgr.BackupMetadata switch backtype { - case config.BackupMethodLogical: + case backupmgr.BackupMethodLogical: lastmeta = server.LastBackupMeta.Logical defer cluster.CheckLogicalBackupToolVersion(server) // Update backup tool version after backup - case config.BackupMethodPhysical: + case backupmgr.BackupMethodPhysical: lastmeta = server.LastBackupMeta.Physical defer cluster.CheckPhysicalBackupToolVersion(server) // Update backup tool version after backup default: @@ -3391,7 +3334,7 @@ func (server *ServerMonitor) WriteBackupMetadata(backtype config.BackupMethod) { } if _, err := os.Stat(lastmeta.Dest); err == nil { - lastmeta.GetSize() + lastmeta.GetSizeAndFileCount() lastmeta.EndTime = time.Now() } @@ -3445,9 +3388,9 @@ func (server *ServerMonitor) WriteBackupMetadata(backtype config.BackupMethod) { // Revert to previous meta with same type cluster.BackupMetaMap.Delete(lastmeta.Id) switch backtype { - case config.BackupMethodLogical: + case backupmgr.BackupMethodLogical: _, server.LastBackupMeta.Logical = server.GetLatestMeta("logical") - case config.BackupMethodPhysical: + case backupmgr.BackupMethodPhysical: _, server.LastBackupMeta.Physical = server.GetLatestMeta("physical") } } @@ -3513,7 +3456,7 @@ func (server *ServerMonitor) JobFinishReceiveFile(task string) error { server.DelWaitSqlErrorlogCookie() case config.ConstBackupPhysicalTypeXtrabackup, config.ConstBackupPhysicalTypeMariaBackup: backtype := "physical" - server.WriteBackupMetadata(config.BackupMethodPhysical) + server.WriteBackupMetadata(backupmgr.BackupMethodPhysical) server.BackupRestic(cluster.Conf.Cloud18GitUser, cluster.Name, server.DBVersion.Flavor, server.DBVersion.ToString(), backtype, cluster.Conf.BackupPhysicalType) cluster.SetInPhysicalBackupState(false) case "printdefault-current": diff --git a/cluster/srv_set.go b/cluster/srv_set.go index 3fd9acbcb..a67f24893 100644 --- a/cluster/srv_set.go +++ b/cluster/srv_set.go @@ -22,6 +22,7 @@ import ( "github.com/go-sql-driver/mysql" "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" @@ -510,7 +511,7 @@ func (server *ServerMonitor) SetNeedRefreshJobs(value bool) { server.NeedRefreshJobs = value } -func (server *ServerMonitor) SetPointInTimeMeta(value config.PointInTimeMeta) { +func (server *ServerMonitor) SetPointInTimeMeta(value backupmgr.PointInTimeMeta) { server.PointInTimeMeta = value } @@ -658,7 +659,7 @@ func (server *ServerMonitor) SetDBUserCredentials(user, pass string, withGrantOp // refresh user list users, _, err := dbhelper.GetUsers(server.Conn, server.DBVersion) - server.Users = config.FromNormalGrantsMap(server.Users, users) + server.Users = dbhelper.FromNormalGrantsMap(server.Users, users) // set grants for all hosts of this user for _, u := range server.Users.ToNewMap() { diff --git a/config/backup.go b/config/backup.go deleted file mode 100644 index 696836d96..000000000 --- a/config/backup.go +++ /dev/null @@ -1,78 +0,0 @@ -package config - -import ( - "os" - "path/filepath" - "time" -) - -type BackupMethod int - -const ( - BackupMethodLogical = 1 - BackupMethodPhysical = 2 -) - -type BackupStrategy int - -const ( - BackupStrategyFull = 1 - BackupStrategyIncremental = 2 - BackupStrategyDifferential = 3 -) - -type BackupMetadata struct { - Id int64 `json:"id"` - StartTime time.Time `json:"startTime"` - EndTime time.Time `json:"endTime"` - BackupMethod BackupMethod `json:"backupMethod"` - BackupTool string `json:"backupTool"` - BackupToolVersion string `json:"backupToolVersion"` - BackupStrategy BackupStrategy `json:"backupStrategy"` - Source string `json:"source"` - Dest string `json:"dest"` - Size int64 `json:"size"` - Compressed bool `json:"compressed"` - Encrypted bool `json:"encrypted"` - EncryptionAlgo string `json:"encryptionAlgo"` - EncryptionKey string `json:"encryptionKey"` - Checksum string `json:"checksum"` - RetentionDays int `json:"retentionDays"` - BinLogFileName string `json:"binLogFileName"` - BinLogFilePos uint64 `json:"binLogFilePos"` - BinLogGtid string `json:"binLogUuid"` - Completed bool `json:"completed"` - SplitUser bool `json:"splitUser"` - Previous int64 `json:"previous"` -} - -type PointInTimeMeta struct { - IsInPITR bool - UseBinlog bool - Backup int64 - RestoreTime int64 -} - -func (bm *BackupMetadata) GetSize() error { - var size int64 = 0 - err := filepath.Walk(bm.Dest, func(_ string, info os.FileInfo, err error) error { - if err == nil && !info.IsDir() { - size += info.Size() - } - return err - }) - bm.Size = size - return err -} - -type ReadBinaryLogsBoundary struct { - UseTimestamp bool - Filename string - Position int64 - Timestamp time.Time -} - -type ReadBinaryLogsRange struct { - Start ReadBinaryLogsBoundary - End ReadBinaryLogsBoundary -} diff --git a/config/config.go b/config/config.go index 1dbc2c102..f97ff76d4 100644 --- a/config/config.go +++ b/config/config.go @@ -155,7 +155,7 @@ type Config struct { LogGraphiteLevel int `mapstructure:"log-level-graphite" toml:"log-level-graphite" json:"logGraphiteLevel"` LogBinlogPurge bool `mapstructure:"log-binlog-purge" toml:"log-binlog-purge" json:"logBinlogPurge"` LogBinlogPurgeLevel int `mapstructure:"log-level-binlog-purge" toml:"log-level-binlog-purge" json:"logBinlogPurgeLevel"` - LogArchiveLevel int `mapstructure:"log-level-archive" toml:"log-level-archive" json:"logArchiveLevel"` + LogResticLevel int `mapstructure:"log-level-restic" toml:"log-level-restic" json:"logResticLevel"` LogMailerLevel int `mapstructure:"log-level-mailer" toml:"log-level-mailer" json:"logMailerLevel"` LogSupport bool `scope:"server" mapstructure:"log-support" toml:"log-support" json:"logSupport"` LogSupportLevel int `scope:"server" mapstructure:"log-level-support" toml:"log-level-support" json:"logSupportLevel"` @@ -734,6 +734,8 @@ type Config struct { BackupResticPassword string `mapstructure:"backup-restic-password" toml:"backup-restic-password" json:"-"` BackupResticAws bool `mapstructure:"backup-restic-aws" toml:"backup-restic-aws" json:"backupResticAws"` BackupResticTimeout int `mapstructure:"backup-restic-timeout" toml:"backup-restic-timeout" json:"backupResticTimeout"` + BackupResticSaveQueueOnShutdown bool `mapstructure:"backup-restic-save-queue-on-shutdown" toml:"backup-restic-save-queue-on-shutdown" json:"backupResticSaveQueueOnShutdown"` + BackupResticRunQueueOnStartup bool `mapstructure:"backup-restic-run-queue-on-startup" toml:"backup-restic-run-queue-on-startup" json:"backupResticRunQueueOnStartup"` BackupStreaming bool `mapstructure:"backup-streaming" toml:"backup-streaming" json:"backupStreaming"` BackupStreamingDebug bool `mapstructure:"backup-streaming-debug" toml:"backup-streaming-debug" json:"backupStreamingDebug"` BackupStreamingAwsAccessKeyId string `mapstructure:"backup-streaming-aws-access-key-id" toml:"backup-streaming-aws-access-key-id" json:"-"` @@ -1332,7 +1334,7 @@ const ( ConstLogModGraphite = 15 ConstLogModPurge = 16 ConstLogModTask = 17 - ConstLogModArchive = 18 + ConstLogModRestic = 18 ConstLogModMailer = 19 ConstLogModSupport = 20 ConstLogModExternalScript = 21 @@ -1368,7 +1370,7 @@ const ( ConstLogNameGraphite string = "log-graphite" ConstLogNamePurge string = "log-binlog-purge" ConstLogNameTask string = "log-task" - ConstLogNameArchive string = "log-archive" + ConstLogNameRestic string = "log-restic" ConstLogNameMailer string = "log-mailer" ConstLogNameExternalScript string = "log-external-script" ConstLogNameLogSQL string = "log-sql" @@ -3271,8 +3273,8 @@ func (conf *Config) IsEligibleForPrinting(module int, level string) bool { if conf.LogExternalScript { return conf.LogExternalScriptLevel >= lvl } - case module == ConstLogModArchive: - return conf.LogArchiveLevel >= lvl + case module == ConstLogModRestic: + return conf.LogResticLevel >= lvl case module == ConstLogModMailer: return conf.LogMailerLevel >= lvl case module == ConstLogModSupport: @@ -3464,8 +3466,8 @@ func GetTagsForLog(module int) string { return "sql" case ConstLogModApp: return "app" - case ConstLogModArchive: - return "archive" + case ConstLogModRestic: + return "restic" case ConstLogModMailer: return "mailer" case ConstLogModDbErrors: @@ -3544,8 +3546,8 @@ func GetIndexFromModuleName(module string) int { return ConstLogModApp case ConstLogNameSupport: return ConstLogModSupport - case ConstLogNameArchive: - return ConstLogModArchive + case ConstLogNameRestic: + return ConstLogModRestic case ConstLogNameMailer: return ConstLogModMailer case ConstLogNameDbErrors: @@ -4223,7 +4225,7 @@ func GetKeyAliasMap() map[string]string { "log-proxy-level": "log-level-proxy", "log-graphite-level": "log-level-graphite", "log-binlog-purge-level": "log-level-binlog-purge", - "log-archive-level": "log-level-archive", + "log-restic-level": "log-level-restic", "log-mailer-level": "log-level-mailer", "log-support-level": "log-level-support", "log-external-script-level": "log-level-external-script", diff --git a/config/maps.go b/config/maps.go index 338bb96e2..43eb413a6 100644 --- a/config/maps.go +++ b/config/maps.go @@ -5,8 +5,6 @@ import ( "sync" "time" - v3 "github.com/signal18/replication-manager/repmanv3" - "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/version" ) @@ -206,387 +204,6 @@ func NewUIntsMap() *UIntsMap { return m } -type PFSQueriesMap struct { - *sync.Map -} - -func NewPFSQueriesMap() *PFSQueriesMap { - s := new(sync.Map) - m := &PFSQueriesMap{Map: s} - return m -} - -func (m *PFSQueriesMap) Get(key string) *dbhelper.PFSQuery { - if v, ok := m.Load(key); ok { - return v.(*dbhelper.PFSQuery) - } - return nil -} - -func (m *PFSQueriesMap) CheckAndGet(key string) (*dbhelper.PFSQuery, bool) { - v, ok := m.Load(key) - if ok { - return v.(*dbhelper.PFSQuery), true - } - return nil, false -} - -func (m *PFSQueriesMap) Set(key string, value *dbhelper.PFSQuery) { - m.Store(key, value) -} - -func (m *PFSQueriesMap) ToNormalMap(c map[string]*dbhelper.PFSQuery) { - // Clear the old values in the output map - for k := range c { - delete(c, k) - } - - // Insert all values from the PFSQueriesMap to the output map - m.Callback(func(key string, value *dbhelper.PFSQuery) bool { - c[key] = value - return true - }) -} - -func (m *PFSQueriesMap) ToNewMap() map[string]*dbhelper.PFSQuery { - result := make(map[string]*dbhelper.PFSQuery) - m.Range(func(k, v any) bool { - result[k.(string)] = v.(*dbhelper.PFSQuery) - return true - }) - return result -} - -func (m *PFSQueriesMap) Callback(f func(key string, value *dbhelper.PFSQuery) bool) { - m.Range(func(k, v any) bool { - return f(k.(string), v.(*dbhelper.PFSQuery)) - }) -} - -func (m *PFSQueriesMap) Clear() { - m.Range(func(key, value any) bool { - m.Delete(key.(string)) - return true - }) -} - -func FromNormalPFSMap(m *PFSQueriesMap, c map[string]dbhelper.PFSQuery) *PFSQueriesMap { - if m == nil { - m = NewPFSQueriesMap() - } else { - m.Clear() - } - - for k, v := range c { - m.Set(k, &v) - } - - return m -} - -func FromPFSQueriesMap(m *PFSQueriesMap, c *PFSQueriesMap) *PFSQueriesMap { - if m == nil { - m = NewPFSQueriesMap() - } else { - m.Clear() - } - - if c != nil { - c.Callback(func(key string, value *dbhelper.PFSQuery) bool { - m.Set(key, value) - return true - }) - } - - return m -} - -type PluginsMap struct { - *sync.Map -} - -func NewPluginsMap() *PluginsMap { - s := new(sync.Map) - m := &PluginsMap{Map: s} - return m -} - -func (m *PluginsMap) Get(key string) *dbhelper.Plugin { - if v, ok := m.Load(key); ok { - return v.(*dbhelper.Plugin) - } - return nil -} - -func (m *PluginsMap) CheckAndGet(key string) (*dbhelper.Plugin, bool) { - v, ok := m.Load(key) - if ok { - return v.(*dbhelper.Plugin), true - } - return nil, false -} - -func (m *PluginsMap) Set(key string, value *dbhelper.Plugin) { - m.Store(key, value) -} - -func (m *PluginsMap) ToNormalMap(c map[string]*dbhelper.Plugin) { - // Clear the old values in the output map - for k := range c { - delete(c, k) - } - - // Insert all values from the PluginsMap to the output map - m.Callback(func(key string, value *dbhelper.Plugin) bool { - c[key] = value - return true - }) -} - -func (m *PluginsMap) ToNewMap() map[string]*dbhelper.Plugin { - result := make(map[string]*dbhelper.Plugin) - m.Range(func(k, v any) bool { - result[k.(string)] = v.(*dbhelper.Plugin) - return true - }) - return result -} - -func (m *PluginsMap) Callback(f func(key string, value *dbhelper.Plugin) bool) { - m.Range(func(k, v any) bool { - return f(k.(string), v.(*dbhelper.Plugin)) - }) -} - -func (m *PluginsMap) Clear() { - m.Range(func(key, value any) bool { - m.Delete(key.(string)) - return true - }) -} - -func FromNormalPluginsMap(m *PluginsMap, c map[string]*dbhelper.Plugin) *PluginsMap { - if m == nil { - m = NewPluginsMap() - } else { - m.Clear() - } - - for k, v := range c { - m.Set(k, v) - } - - return m -} - -func FromPluginsMap(m *PluginsMap, c *PluginsMap) *PluginsMap { - if m == nil { - m = NewPluginsMap() - } else { - m.Clear() - } - - if c != nil { - c.Callback(func(key string, value *dbhelper.Plugin) bool { - m.Set(key, value) - return true - }) - } - - return m -} - -type GrantsMap struct { - *sync.Map -} - -func NewGrantsMap() *GrantsMap { - s := new(sync.Map) - m := &GrantsMap{Map: s} - return m -} - -func (m *GrantsMap) Get(key string) *dbhelper.Grant { - if v, ok := m.Load(key); ok { - return v.(*dbhelper.Grant) - } - return nil -} - -func (m *GrantsMap) CheckAndGet(key string) (*dbhelper.Grant, bool) { - v, ok := m.Load(key) - if ok { - return v.(*dbhelper.Grant), true - } - return nil, false -} - -func (m *GrantsMap) Set(key string, value *dbhelper.Grant) { - m.Store(key, value) -} - -func (m *GrantsMap) ToNormalMap(c map[string]*dbhelper.Grant) { - // Clear the old values in the output map - for k := range c { - delete(c, k) - } - - // Insert all values from the GrantsMap to the output map - m.Callback(func(key string, value *dbhelper.Grant) bool { - c[key] = value - return true - }) -} - -func (m *GrantsMap) ToNewMap() map[string]*dbhelper.Grant { - result := make(map[string]*dbhelper.Grant) - m.Range(func(k, v any) bool { - result[k.(string)] = v.(*dbhelper.Grant) - return true - }) - return result -} - -func (m *GrantsMap) Callback(f func(key string, value *dbhelper.Grant) bool) { - m.Range(func(k, v any) bool { - return f(k.(string), v.(*dbhelper.Grant)) - }) -} - -func (m *GrantsMap) Clear() { - m.Range(func(key, value any) bool { - m.Delete(key.(string)) - return true - }) -} - -func FromNormalGrantsMap(m *GrantsMap, c map[string]*dbhelper.Grant) *GrantsMap { - if m == nil { - m = NewGrantsMap() - } else { - m.Clear() - } - - for k, v := range c { - m.Set(k, v) - } - - return m -} - -func FromGrantsMap(m *GrantsMap, c *GrantsMap) *GrantsMap { - if m == nil { - m = NewGrantsMap() - } else { - m.Clear() - } - - if c != nil { - c.Callback(func(key string, value *dbhelper.Grant) bool { - m.Set(key, value) - return true - }) - } - - return m -} - -type TablesMap struct { - *sync.Map -} - -func (m *TablesMap) Get(key string) *v3.Table { - if v, ok := m.Load(key); ok { - return v.(*v3.Table) - } - return nil -} - -func (m *TablesMap) CheckAndGet(key string) (*v3.Table, bool) { - v, ok := m.Load(key) - if ok { - return v.(*v3.Table), true - } - return nil, false -} - -func (m *TablesMap) ToNormalMap(c map[string]*v3.Table) { - // clear old value - c = make(map[string]*v3.Table) - - // Insert all values to new map - m.Range(func(k any, v any) bool { - c[k.(string)] = v.(*v3.Table) - return true - }) -} - -func (m *TablesMap) ToNewMap() map[string]*v3.Table { - // clear old value - c := make(map[string]*v3.Table) - - // Insert all values to new map - m.Range(func(k any, v any) bool { - c[k.(string)] = v.(*v3.Table) - return true - }) - - return c -} - -func (m *TablesMap) Set(k string, v *v3.Table) { - m.Store(k, v) -} - -func FromNormalTablesMap(m *TablesMap, c map[string]*v3.Table) *TablesMap { - if m == nil { - m = NewTablesMap() - } else { - m.Clear() - } - - for k, v := range c { - m.Store(k, v) - } - - return m -} - -func FromTablesSyncMap(m *TablesMap, c *TablesMap) *TablesMap { - if m == nil { - m = NewTablesMap() - } else { - m.Clear() - } - - if c != nil { - c.Range(func(k any, v any) bool { - m.Store(k.(string), v.(*v3.Table)) - return true - }) - } - - return m -} - -func (m *TablesMap) Callback(f func(key, value any) bool) { - m.Range(f) -} - -func (m *TablesMap) Clear() { - m.Range(func(key any, value any) bool { - k := key.(string) - m.Delete(k) - return true - }) -} - -func NewTablesMap() *TablesMap { - s := new(sync.Map) - m := &TablesMap{Map: s} - return m -} - type WorkLoadsMap struct { *sync.Map } @@ -789,116 +406,6 @@ func FromTasksMap(m *TasksMap, c *TasksMap) *TasksMap { return m } -type BackupMetaMap struct { - *sync.Map -} - -func NewBackupMetaMap() *BackupMetaMap { - s := new(sync.Map) - m := &BackupMetaMap{Map: s} - return m -} - -func (m *BackupMetaMap) Get(key int64) *BackupMetadata { - if v, ok := m.Load(key); ok { - return v.(*BackupMetadata) - } - return nil -} - -func (m *BackupMetaMap) CheckAndGet(key int64) (*BackupMetadata, bool) { - v, ok := m.Load(key) - if ok { - return v.(*BackupMetadata), true - } - return nil, false -} - -func (m *BackupMetaMap) Set(key int64, value *BackupMetadata) { - m.Store(key, value) -} - -func (m *BackupMetaMap) ToNormalMap(c map[int64]*BackupMetadata) { - // Clear the old values in the output map - for k := range c { - delete(c, k) - } - - // Insert all values from the BackupMetaMap to the output map - m.Callback(func(key int64, value *BackupMetadata) bool { - c[key] = value - return true - }) -} - -func (m *BackupMetaMap) ToNewMap() map[int64]*BackupMetadata { - result := make(map[int64]*BackupMetadata) - m.Range(func(k, v any) bool { - result[k.(int64)] = v.(*BackupMetadata) - return true - }) - return result -} - -func (m *BackupMetaMap) Callback(f func(key int64, value *BackupMetadata) bool) { - m.Range(func(k, v any) bool { - return f(k.(int64), v.(*BackupMetadata)) - }) -} - -func (m *BackupMetaMap) Clear() { - m.Range(func(key, value any) bool { - m.Delete(key.(int64)) - return true - }) -} - -func FromNormalBackupMetaMap(m *BackupMetaMap, c map[int64]*BackupMetadata) *BackupMetaMap { - if m == nil { - m = NewBackupMetaMap() - } else { - m.Clear() - } - - for k, v := range c { - m.Set(k, v) - } - - return m -} - -func FromBackupMetaMap(m *BackupMetaMap, c *BackupMetaMap) *BackupMetaMap { - if m == nil { - m = NewBackupMetaMap() - } else { - m.Clear() - } - - if c != nil { - c.Callback(func(key int64, value *BackupMetadata) bool { - m.Set(key, value) - return true - }) - } - - return m -} - -// GetBackupsByToolAndSource retrieves backups with the same backupTool and source. -func (b *BackupMetaMap) GetPreviousBackup(backupTool string, source string) *BackupMetadata { - var result *BackupMetadata - b.Map.Range(func(key, value interface{}) bool { - if backup, ok := value.(*BackupMetadata); ok { - if backup.BackupTool == backupTool && backup.Source == source { - result = backup - return false - } - } - return true - }) - return result -} - type VersionsMap struct { *sync.Map } diff --git a/docs/docs.go b/docs/docs.go index 358f7c726..6eb91eeef 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -4241,7 +4241,7 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives": { + "/api/clusters/{clusterName}/backups": { "get": { "description": "This endpoint retrieves the backups for the specified cluster.", "produces": [ @@ -4275,7 +4275,9 @@ const docTemplate = `{ "type": "array", "items": { "type": "object", - "additionalProperties": true + "additionalProperties": { + "$ref": "#/definitions/backupmgr.BackupMetadata" + } } } }, @@ -4294,16 +4296,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/fetch": { - "post": { - "description": "Fetches the restic backup for the specified cluster.", + "/api/clusters/{clusterName}/backups/stats": { + "get": { + "description": "This endpoint retrieves the backup stats for the specified cluster.", "produces": [ "application/json" ], "tags": [ "ClusterBackups" ], - "summary": "Fetch Archives", + "summary": "Retrieve backup stats for a specific cluster", "parameters": [ { "type": "string", @@ -4323,9 +4325,12 @@ const docTemplate = `{ ], "responses": { "200": { - "description": "Archives fetch queued", + "description": "List of backups", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/backupmgr.BackupStat" + } } }, "403": { @@ -4343,16 +4348,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/init": { - "post": { - "description": "Inits the restic backup for the specified cluster.", + "/api/clusters/{clusterName}/certificates": { + "get": { + "description": "This endpoint retrieves the client certificates for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "ClusterCertificates" ], - "summary": "Init Restic Backup", + "summary": "Retrieve client certificates for a specific cluster", "parameters": [ { "type": "string", @@ -4372,19 +4377,17 @@ const docTemplate = `{ ], "responses": { "200": { - "description": "Archives purge queued", - "schema": { - "type": "string" - } - }, - "403": { - "description": "No valid ACL", + "description": "List of client certificates", "schema": { - "type": "string" + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } } }, "500": { - "description": "No cluster", + "description": "Internal Server Error", "schema": { "type": "string" } @@ -4392,16 +4395,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/init/{force}": { - "post": { - "description": "Inits the restic backup for the specified cluster.", + "/api/clusters/{clusterName}/diffvariables": { + "get": { + "description": "This endpoint retrieves the variable differences for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cluster" ], - "summary": "Init Restic Backup", + "summary": "Retrieve variable differences for a specific cluster", "parameters": [ { "type": "string", @@ -4417,22 +4420,16 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true - }, - { - "enum": [ - "force" - ], - "type": "string", - "description": "Force init", - "name": "force", - "in": "path" } ], "responses": { "200": { - "description": "Archives purge queued", + "description": "List of variable differences", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/cluster.VariableDiff" + } } }, "403": { @@ -4450,16 +4447,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/purge": { + "/api/clusters/{clusterName}/docker/actions/registry-connect": { "post": { - "description": "Purges the restic backup for the specified cluster.", + "description": "Logs in to a Docker registry using the provided credentials.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Docker" ], - "summary": "Purge Restic Backup", + "summary": "Docker Registry Login", "parameters": [ { "type": "string", @@ -4475,11 +4475,26 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "Docker Registry Login Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.DockerRegistryLoginForm" + } } ], "responses": { "200": { - "description": "Archives purge queued", + "description": "Docker registry login successful", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Error decoding request body", "schema": { "type": "string" } @@ -4491,7 +4506,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Error creating request\" or \"Error making request to Docker registry\" or \"Docker registry login failed", "schema": { "type": "string" } @@ -4499,16 +4514,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/restore-config/{force}": { - "post": { - "description": "Restores the restic config for the specified cluster.", + "/api/clusters/{clusterName}/docker/browse/{imageRef}": { + "get": { + "description": "Lists files in a specified directory of a Docker image.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Docker" ], - "summary": "Restore Restic Config", + "summary": "List Files in Docker Image Directory", "parameters": [ { "type": "string", @@ -4527,16 +4545,21 @@ const docTemplate = `{ }, { "type": "string", - "default": "noforce", - "description": "Force Restore", - "name": "force", + "description": "Docker Image Reference", + "name": "imageRef", "in": "path", "required": true } ], "responses": { "200": { - "description": "Archives restore config done", + "description": "List of files in the directory", + "schema": { + "$ref": "#/definitions/treehelper.FileTreeCache" + } + }, + "400": { + "description": "Image reference or source directory not provided", "schema": { "type": "string" } @@ -4548,7 +4571,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Error listing files in image directory\" or \"Error encoding JSON", "schema": { "type": "string" } @@ -4556,16 +4579,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/stats": { - "get": { - "description": "This endpoint retrieves the backup stats for the specified cluster.", + "/api/clusters/{clusterName}/ext-role/accept": { + "post": { + "description": "This endpoint accepts external operations for the specified cluster.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cloud18" ], - "summary": "Retrieve backup stats for a specific cluster", + "summary": "Accept external operations for a specific cluster", "parameters": [ { "type": "string", @@ -4581,16 +4607,22 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "User Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.CloudUserForm" + } } ], "responses": { "200": { - "description": "List of backups", + "description": "Email sent to sponsor!", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/archiver.BackupStat" - } + "type": "string" } }, "403": { @@ -4600,7 +4632,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Error accepting subscription", "schema": { "type": "string" } @@ -4608,16 +4640,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/task-queue": { - "get": { - "description": "Gets the restic task queue for the specified cluster.", + "/api/clusters/{clusterName}/ext-role/quote": { + "post": { + "description": "This endpoint quotes external operations for the specified cluster.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cloud18" ], - "summary": "Get Archives Task Queue", + "summary": "Quote external operations for a specific cluster", "parameters": [ { "type": "string", @@ -4633,16 +4668,22 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "User Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.CloudUserForm" + } } ], "responses": { "200": { - "description": "Task queue fetched", + "description": "Email sent to sponsor!", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/archiver.ResticTask" - } + "type": "string" } }, "403": { @@ -4652,7 +4693,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Error accepting external operations", "schema": { "type": "string" } @@ -4660,16 +4701,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/task-queue/reset": { - "get": { - "description": "Empty the restic task queue for the specified cluster.", + "/api/clusters/{clusterName}/ext-role/refuse": { + "post": { + "description": "This endpoint rejects external operations for the specified cluster.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cloud18" ], - "summary": "Reset Archives Task Queue", + "summary": "Reject external operations for a specific cluster", "parameters": [ { "type": "string", @@ -4685,11 +4729,20 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "User Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.CloudUserForm" + } } ], "responses": { "200": { - "description": "Task queue reset", + "description": "Subscription removed!", "schema": { "type": "string" } @@ -4701,7 +4754,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Error removing subscription", "schema": { "type": "string" } @@ -4709,16 +4762,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/archives/unlock": { + "/api/clusters/{clusterName}/ext-role/subscribe": { "post": { - "description": "Unlocks the restic backup for the specified cluster.", + "description": "This endpoint subscribes external operations for the specified cluster.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cloud18" ], - "summary": "Unlock Restic Backup", + "summary": "subscribe external operations for a specific cluster", "parameters": [ { "type": "string", @@ -4734,11 +4790,20 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "User Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.CloudUserForm" + } } ], "responses": { "200": { - "description": "Archives purge queued", + "description": "Email sent to sponsor!", "schema": { "type": "string" } @@ -4750,7 +4815,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Error subscribing external operations", "schema": { "type": "string" } @@ -4758,16 +4823,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/backups": { + "/api/clusters/{clusterName}/graphite-filterlist": { "get": { - "description": "This endpoint retrieves the backups for the specified cluster.", + "description": "This endpoint retrieves the Graphite filter list for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "ClusterGraphite" ], - "summary": "Retrieve backups for a specific cluster", + "summary": "Retrieve Graphite filter list for a specific cluster", "parameters": [ { "type": "string", @@ -4787,23 +4852,16 @@ const docTemplate = `{ ], "responses": { "200": { - "description": "List of backups", + "description": "List of Graphite filters", "schema": { "type": "array", "items": { - "type": "object", - "additionalProperties": true + "type": "string" } } }, - "403": { - "description": "No valid ACL", - "schema": { - "type": "string" - } - }, "500": { - "description": "No cluster", + "description": "Internal Server Error", "schema": { "type": "string" } @@ -4811,16 +4869,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/backups/stats": { + "/api/clusters/{clusterName}/health": { "get": { - "description": "This endpoint retrieves the backup stats for the specified cluster.", + "description": "Get the health status of the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "ClusterHealth" ], - "summary": "Retrieve backup stats for a specific cluster", + "summary": "Get Cluster Health", "parameters": [ { "type": "string", @@ -4840,12 +4898,9 @@ const docTemplate = `{ ], "responses": { "200": { - "description": "List of backups", + "description": "Cluster health fetched", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/archiver.BackupStat" - } + "$ref": "#/definitions/peer.PeerHealth" } }, "403": { @@ -4863,16 +4918,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/certificates": { + "/api/clusters/{clusterName}/is-in-errstate/{errstate}": { "get": { - "description": "This endpoint retrieves the client certificates for the specified cluster.", + "description": "Checks if the specified cluster is in an error state.", "produces": [ "application/json" ], "tags": [ - "ClusterCertificates" + "ClusterHealth" ], - "summary": "Retrieve client certificates for a specific cluster", + "summary": "Check if Cluster is in Error State", "parameters": [ { "type": "string", @@ -4888,21 +4943,30 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true - } + }, + { + "type": "string", + "description": "State to check", + "name": "state", + "in": "path", + "required": true + } ], "responses": { "200": { - "description": "List of client certificates", + "description": "true\" or \"false", "schema": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": true - } + "type": "string" + } + }, + "403": { + "description": "No valid ACL", + "schema": { + "type": "string" } }, "500": { - "description": "Internal Server Error", + "description": "No cluster", "schema": { "type": "string" } @@ -4910,16 +4974,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/diffvariables": { + "/api/clusters/{clusterName}/jobs": { "get": { - "description": "This endpoint retrieves the variable differences for the specified cluster.", + "description": "This endpoint retrieves the job entries for the specified cluster.", "produces": [ "application/json" ], "tags": [ "Cluster" ], - "summary": "Retrieve variable differences for a specific cluster", + "summary": "Retrieve job entries for a specific cluster", "parameters": [ { "type": "string", @@ -4939,11 +5003,12 @@ const docTemplate = `{ ], "responses": { "200": { - "description": "List of variable differences", + "description": "List of job entries", "schema": { "type": "array", "items": { - "$ref": "#/definitions/cluster.VariableDiff" + "type": "object", + "additionalProperties": true } } }, @@ -4954,7 +5019,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Cluster Not Found", "schema": { "type": "string" } @@ -4962,66 +5027,118 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/docker/actions/registry-connect": { - "post": { - "description": "Logs in to a Docker registry using the provided credentials.", - "consumes": [ - "application/json" - ], + "/api/clusters/{clusterName}/jobs-log-level/{task}/{level}": { + "get": { + "description": "Checks if a specific log level is enabled for a given task in the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Docker" + "ClusterLogging" ], - "summary": "Docker Registry Login", + "summary": "Check Cluster Log Level", "parameters": [ { "type": "string", - "default": "Bearer \u003cAdd access token here\u003e", - "description": "Insert your access token", - "name": "Authorization", - "in": "header", + "description": "Cluster Name", + "name": "clusterName", + "in": "path", "required": true }, { + "enum": [ + "xtrabackup", + "mariabackup", + "errorlog", + "slowquery", + "sqlerrorlog", + "auditlog", + "zfssnapback", + "optimize", + "reseedxtrabackup", + "reseedmariabackup", + "reseedmysqldump", + "flashbackxtrabackup", + "flashbackmariadbackup", + "flashbackmysqldump", + "stop", + "restart", + "start", + "printdefault-current", + "printdefault-dummy", + "jobs-check", + "jobs-upgrade" + ], "type": "string", - "description": "Cluster Name", - "name": "clusterName", + "description": "Task Name", + "name": "task", "in": "path", "required": true }, { - "description": "Docker Registry Login Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.DockerRegistryLoginForm" - } + "enum": [ + "ERROR", + "WARN", + "INFO", + "DEBUG" + ], + "type": "string", + "description": "Log Level", + "name": "level", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Docker registry login successful", + "description": "true\" or \"false", "schema": { "type": "string" } }, - "400": { - "description": "Error decoding request body", + "403": { + "description": "No valid ACL", "schema": { "type": "string" } }, - "403": { - "description": "No valid ACL", + "500": { + "description": "No cluster", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/clusters/{clusterName}/need-rolling-reprov": { + "get": { + "description": "Checks if a specified cluster needs a rolling reprovision.", + "produces": [ + "text/plain" + ], + "tags": [ + "Database" + ], + "summary": "Check if a cluster needs a rolling reprovision", + "parameters": [ + { + "type": "string", + "description": "Cluster Name", + "name": "clusterName", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "200 -Need rolling reprov!", "schema": { "type": "string" } }, "500": { - "description": "Error creating request\" or \"Error making request to Docker registry\" or \"Docker registry login failed", + "description": "503 -No rolling reprov needed!\" or \"500 -No cluster", "schema": { "type": "string" } @@ -5029,19 +5146,51 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/docker/browse/{imageRef}": { + "/api/clusters/{clusterName}/need-rolling-restart": { "get": { - "description": "Lists files in a specified directory of a Docker image.", - "consumes": [ - "application/json" + "description": "Checks if a specified cluster needs a rolling restart.", + "produces": [ + "text/plain" + ], + "tags": [ + "Database" + ], + "summary": "Check if a cluster needs a rolling restart", + "parameters": [ + { + "type": "string", + "description": "Cluster Name", + "name": "clusterName", + "in": "path", + "required": true + } ], + "responses": { + "200": { + "description": "200 -Need rolling restart!", + "schema": { + "type": "string" + } + }, + "500": { + "description": "503 -No rolling restart needed!\" or \"500 -No cluster", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/clusters/{clusterName}/opensvc-gateway": { + "get": { + "description": "Retrieves the gateway nodes of the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Docker" + "ClusterGateway" ], - "summary": "List Files in Docker Image Directory", + "summary": "Get Cluster Gateway Nodes", "parameters": [ { "type": "string", @@ -5057,26 +5206,16 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Docker Image Reference", - "name": "imageRef", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "List of files in the directory", - "schema": { - "$ref": "#/definitions/treehelper.FileTreeCache" - } - }, - "400": { - "description": "Image reference or source directory not provided", + "description": "List of gateway nodes", "schema": { - "type": "string" + "type": "array", + "items": { + "type": "string" + } } }, "403": { @@ -5086,7 +5225,7 @@ const docTemplate = `{ } }, "500": { - "description": "Error listing files in image directory\" or \"Error encoding JSON", + "description": "No cluster\" or \"Error getting gateway nodes", "schema": { "type": "string" } @@ -5094,19 +5233,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/ext-role/accept": { - "post": { - "description": "This endpoint accepts external operations for the specified cluster.", - "consumes": [ - "application/json" - ], + "/api/clusters/{clusterName}/opensvc-stats": { + "get": { + "description": "Retrieves the OpenSVC daemon status of the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Cloud18" + "ClusterGateway" ], - "summary": "Accept external operations for a specific cluster", + "summary": "Get OpenSVC Daemon Status", "parameters": [ { "type": "string", @@ -5122,22 +5258,16 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true - }, - { - "description": "User Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.CloudUserForm" - } } ], "responses": { "200": { - "description": "Email sent to sponsor!", + "description": "OpenSVC daemon status fetched", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/opensvc.DaemonNodeStats" + } } }, "403": { @@ -5147,7 +5277,7 @@ const docTemplate = `{ } }, "500": { - "description": "Error accepting subscription", + "description": "No cluster\" or \"Error getting OpenSVC stats", "schema": { "type": "string" } @@ -5155,9 +5285,49 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/ext-role/quote": { - "post": { - "description": "This endpoint quotes external operations for the specified cluster.", + "/api/clusters/{clusterName}/proxies/{proxyName}": { + "get": { + "description": "Shows the proxies for that specific named cluster", + "tags": [ + "Proxies" + ], + "summary": "Shows the proxies for that specific named cluster", + "parameters": [ + { + "type": "string", + "default": "Bearer \u003cAdd access token here\u003e", + "description": "Insert your access token", + "name": "Authorization", + "in": "header", + "required": true + }, + { + "type": "string", + "description": "Cluster Name", + "name": "clusterName", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Server details retrieved successfully", + "schema": { + "$ref": "#/definitions/cluster.Proxy" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/need-reprov": { + "get": { + "description": "Check if the proxy service for a given cluster and proxy needs reprovisioning", "consumes": [ "application/json" ], @@ -5165,9 +5335,9 @@ const docTemplate = `{ "application/json" ], "tags": [ - "Cloud18" + "Proxies" ], - "summary": "Quote external operations for a specific cluster", + "summary": "Check if Proxy Needs Reprovision", "parameters": [ { "type": "string", @@ -5185,18 +5355,16 @@ const docTemplate = `{ "required": true }, { - "description": "User Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.CloudUserForm" - } + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Email sent to sponsor!", + "description": "Need reprov!", "schema": { "type": "string" } @@ -5208,17 +5376,23 @@ const docTemplate = `{ } }, "500": { - "description": "Error accepting external operations", + "description": "No cluster", + "schema": { + "type": "string" + } + }, + "503": { + "description": "No reprov needed!\" \"Not a Valid Server!", "schema": { "type": "string" } } } } - }, - "/api/clusters/{clusterName}/ext-role/refuse": { - "post": { - "description": "This endpoint rejects external operations for the specified cluster.", + }, + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/need-restart": { + "get": { + "description": "Check if the proxy service for a given cluster and proxy needs a restart", "consumes": [ "application/json" ], @@ -5226,9 +5400,9 @@ const docTemplate = `{ "application/json" ], "tags": [ - "Cloud18" + "Proxies" ], - "summary": "Reject external operations for a specific cluster", + "summary": "Check if Proxy Needs Restart", "parameters": [ { "type": "string", @@ -5246,18 +5420,16 @@ const docTemplate = `{ "required": true }, { - "description": "User Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.CloudUserForm" - } + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Subscription removed!", + "description": "Need restart!", "schema": { "type": "string" } @@ -5269,7 +5441,13 @@ const docTemplate = `{ } }, "500": { - "description": "Error removing subscription", + "description": "No cluster", + "schema": { + "type": "string" + } + }, + "503": { + "description": "No restart needed!\" \"Not a Valid Server!", "schema": { "type": "string" } @@ -5277,9 +5455,9 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/ext-role/subscribe": { + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/provision": { "post": { - "description": "This endpoint subscribes external operations for the specified cluster.", + "description": "Provision the proxy service for a given cluster and proxy", "consumes": [ "application/json" ], @@ -5287,9 +5465,9 @@ const docTemplate = `{ "application/json" ], "tags": [ - "Cloud18" + "Proxies" ], - "summary": "subscribe external operations for a specific cluster", + "summary": "Provision Proxy Service", "parameters": [ { "type": "string", @@ -5307,18 +5485,16 @@ const docTemplate = `{ "required": true }, { - "description": "User Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.CloudUserForm" - } + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Email sent to sponsor!", + "description": "Proxy Service Provisioned", "schema": { "type": "string" } @@ -5330,7 +5506,7 @@ const docTemplate = `{ } }, "500": { - "description": "Error subscribing external operations", + "description": "Cluster Not Found\" \"Server Not Found", "schema": { "type": "string" } @@ -5338,16 +5514,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/graphite-filterlist": { - "get": { - "description": "This endpoint retrieves the Graphite filter list for the specified cluster.", + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/staging/{isStaging}": { + "post": { + "description": "Set the proxy service for a given cluster and proxy to staging", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterGraphite" + "Proxies" ], - "summary": "Retrieve Graphite filter list for a specific cluster", + "summary": "Set Staging", "parameters": [ { "type": "string", @@ -5363,20 +5542,43 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Is Staging", + "name": "isStaging", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "List of Graphite filters", + "description": "Proxy Service Set to Staging", "schema": { - "type": "array", - "items": { - "type": "string" - } + "type": "string" + } + }, + "403": { + "description": "No valid ACL", + "schema": { + "type": "string" } }, "500": { - "description": "Internal Server Error", + "description": "Cluster Not Found\" \"Server Not Found", + "schema": { + "type": "string" + } + }, + "503": { + "description": "Not a Valid Server!", "schema": { "type": "string" } @@ -5384,16 +5586,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/health": { - "get": { - "description": "Get the health status of the specified cluster.", + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/start": { + "post": { + "description": "Start the proxy service for a given cluster and proxy", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterHealth" + "Proxies" ], - "summary": "Get Cluster Health", + "summary": "Start Proxy Service", "parameters": [ { "type": "string", @@ -5409,13 +5614,20 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Cluster health fetched", + "description": "Proxy Service Started", "schema": { - "$ref": "#/definitions/peer.PeerHealth" + "type": "string" } }, "403": { @@ -5425,7 +5637,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Cluster Not Found\" \"Server Not Found", "schema": { "type": "string" } @@ -5433,16 +5645,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/is-in-errstate/{errstate}": { - "get": { - "description": "Checks if the specified cluster is in an error state.", + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/stop": { + "post": { + "description": "Stop the proxy service for a given cluster and proxy", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterHealth" + "Proxies" ], - "summary": "Check if Cluster is in Error State", + "summary": "Stop Proxy Service", "parameters": [ { "type": "string", @@ -5461,15 +5676,15 @@ const docTemplate = `{ }, { "type": "string", - "description": "State to check", - "name": "state", + "description": "Proxy Name", + "name": "proxyName", "in": "path", "required": true } ], "responses": { "200": { - "description": "true\" or \"false", + "description": "Proxy Service Stopped", "schema": { "type": "string" } @@ -5481,7 +5696,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster", + "description": "Cluster Not Found\" \"Server Not Found", "schema": { "type": "string" } @@ -5489,16 +5704,19 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/jobs": { - "get": { - "description": "This endpoint retrieves the job entries for the specified cluster.", + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/unprovision": { + "post": { + "description": "Unprovision the proxy service for a given cluster and proxy", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "Cluster" + "Proxies" ], - "summary": "Retrieve job entries for a specific cluster", + "summary": "Unprovision Proxy Service", "parameters": [ { "type": "string", @@ -5514,17 +5732,20 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "List of job entries", + "description": "Proxy Service Unprovisioned", "schema": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": true - } + "type": "string" } }, "403": { @@ -5534,7 +5755,7 @@ const docTemplate = `{ } }, "500": { - "description": "Cluster Not Found", + "description": "Cluster Not Found\" \"Server Not Found", "schema": { "type": "string" } @@ -5542,73 +5763,42 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/jobs-log-level/{task}/{level}": { + "/api/clusters/{clusterName}/queryrules": { "get": { - "description": "Checks if a specific log level is enabled for a given task in the specified cluster.", + "description": "This endpoint retrieves the query rules for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterLogging" + "Cluster" ], - "summary": "Check Cluster Log Level", + "summary": "Retrieve query rules for a specific cluster", "parameters": [ { "type": "string", - "description": "Cluster Name", - "name": "clusterName", - "in": "path", - "required": true - }, - { - "enum": [ - "xtrabackup", - "mariabackup", - "errorlog", - "slowquery", - "sqlerrorlog", - "auditlog", - "zfssnapback", - "optimize", - "reseedxtrabackup", - "reseedmariabackup", - "reseedmysqldump", - "flashbackxtrabackup", - "flashbackmariadbackup", - "flashbackmysqldump", - "stop", - "restart", - "start", - "printdefault-current", - "printdefault-dummy", - "jobs-check", - "jobs-upgrade" - ], - "type": "string", - "description": "Task Name", - "name": "task", - "in": "path", + "default": "Bearer \u003cAdd access token here\u003e", + "description": "Insert your access token", + "name": "Authorization", + "in": "header", "required": true }, { - "enum": [ - "ERROR", - "WARN", - "INFO", - "DEBUG" - ], "type": "string", - "description": "Log Level", - "name": "level", + "description": "Cluster Name", + "name": "clusterName", "in": "path", "required": true } ], "responses": { "200": { - "description": "true\" or \"false", + "description": "List of query rules", "schema": { - "type": "string" + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } } }, "403": { @@ -5626,17 +5816,25 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/need-rolling-reprov": { - "get": { - "description": "Checks if a specified cluster needs a rolling reprovision.", + "/api/clusters/{clusterName}/restic/fetch": { + "post": { + "description": "Fetches the restic backup for the specified cluster.", "produces": [ - "text/plain" + "application/json" ], "tags": [ - "Database" + "ClusterRestic" ], - "summary": "Check if a cluster needs a rolling reprovision", + "summary": "Fetch Restic Snapshots", "parameters": [ + { + "type": "string", + "default": "Bearer \u003cAdd access token here\u003e", + "description": "Insert your access token", + "name": "Authorization", + "in": "header", + "required": true + }, { "type": "string", "description": "Cluster Name", @@ -5647,13 +5845,19 @@ const docTemplate = `{ ], "responses": { "200": { - "description": "200 -Need rolling reprov!", + "description": "Restic snapshots fetch queued", + "schema": { + "type": "string" + } + }, + "403": { + "description": "No valid ACL", "schema": { "type": "string" } }, "500": { - "description": "503 -No rolling reprov needed!\" or \"500 -No cluster", + "description": "No cluster", "schema": { "type": "string" } @@ -5661,17 +5865,25 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/need-rolling-restart": { - "get": { - "description": "Checks if a specified cluster needs a rolling restart.", + "/api/clusters/{clusterName}/restic/init": { + "post": { + "description": "Inits the restic repository for the specified cluster.", "produces": [ - "text/plain" + "application/json" ], "tags": [ - "Database" + "ClusterRestic" ], - "summary": "Check if a cluster needs a rolling restart", + "summary": "Init Restic Repository", "parameters": [ + { + "type": "string", + "default": "Bearer \u003cAdd access token here\u003e", + "description": "Insert your access token", + "name": "Authorization", + "in": "header", + "required": true + }, { "type": "string", "description": "Cluster Name", @@ -5682,13 +5894,19 @@ const docTemplate = `{ ], "responses": { "200": { - "description": "200 -Need rolling restart!", + "description": "Restic repository initialized", + "schema": { + "type": "string" + } + }, + "403": { + "description": "No valid ACL", "schema": { "type": "string" } }, "500": { - "description": "503 -No rolling restart needed!\" or \"500 -No cluster", + "description": "No cluster", "schema": { "type": "string" } @@ -5696,16 +5914,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/opensvc-gateway": { - "get": { - "description": "Retrieves the gateway nodes of the specified cluster.", + "/api/clusters/{clusterName}/restic/init/{force}": { + "post": { + "description": "Inits the restic repository for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterGateway" + "ClusterRestic" ], - "summary": "Get Cluster Gateway Nodes", + "summary": "Init Restic Repository", "parameters": [ { "type": "string", @@ -5721,16 +5939,22 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "enum": [ + "force" + ], + "type": "string", + "description": "Force init", + "name": "force", + "in": "path" } ], "responses": { "200": { - "description": "List of gateway nodes", + "description": "Restic repository initialized", "schema": { - "type": "array", - "items": { - "type": "string" - } + "type": "string" } }, "403": { @@ -5740,7 +5964,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster\" or \"Error getting gateway nodes", + "description": "No cluster", "schema": { "type": "string" } @@ -5748,16 +5972,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/opensvc-stats": { - "get": { - "description": "Retrieves the OpenSVC daemon status of the specified cluster.", + "/api/clusters/{clusterName}/restic/purge/{snapshotID}": { + "post": { + "description": "Purges the restic backup for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterGateway" + "ClusterRestic" ], - "summary": "Get OpenSVC Daemon Status", + "summary": "Purge Restic Backup", "parameters": [ { "type": "string", @@ -5773,16 +5997,20 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "description": "Snapshot ID", + "name": "snapshotID", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "OpenSVC daemon status fetched", + "description": "Restic repository purged", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/opensvc.DaemonNodeStats" - } + "type": "string" } }, "403": { @@ -5792,7 +6020,7 @@ const docTemplate = `{ } }, "500": { - "description": "No cluster\" or \"Error getting OpenSVC stats", + "description": "No cluster", "schema": { "type": "string" } @@ -5800,13 +6028,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}": { - "get": { - "description": "Shows the proxies for that specific named cluster", + "/api/clusters/{clusterName}/restic/restore-config/{force}": { + "post": { + "description": "Restores the restic config for the specified cluster.", + "produces": [ + "application/json" + ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Shows the proxies for that specific named cluster", + "summary": "Restore Restic Config", "parameters": [ { "type": "string", @@ -5822,17 +6053,31 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "default": "noforce", + "description": "Force Restore", + "name": "force", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Server details retrieved successfully", + "description": "Restic config restore done", "schema": { - "$ref": "#/definitions/cluster.Proxy" + "type": "string" + } + }, + "403": { + "description": "No valid ACL", + "schema": { + "type": "string" } }, "500": { - "description": "Internal Server Error", + "description": "No cluster", "schema": { "type": "string" } @@ -5840,19 +6085,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/need-reprov": { + "/api/clusters/{clusterName}/restic/snapshots": { "get": { - "description": "Check if the proxy service for a given cluster and proxy needs reprovisioning", - "consumes": [ - "application/json" - ], + "description": "This endpoint retrieves the backups for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Check if Proxy Needs Reprovision", + "summary": "Retrieve backups for a specific cluster", "parameters": [ { "type": "string", @@ -5868,20 +6110,17 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "Need reprov!", + "description": "List of backups", "schema": { - "type": "string" + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } } }, "403": { @@ -5895,29 +6134,20 @@ const docTemplate = `{ "schema": { "type": "string" } - }, - "503": { - "description": "No reprov needed!\" \"Not a Valid Server!", - "schema": { - "type": "string" - } } } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/need-restart": { + "/api/clusters/{clusterName}/restic/stats": { "get": { - "description": "Check if the proxy service for a given cluster and proxy needs a restart", - "consumes": [ - "application/json" - ], + "description": "This endpoint retrieves the backup stats for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Check if Proxy Needs Restart", + "summary": "Retrieve backup stats for a specific cluster", "parameters": [ { "type": "string", @@ -5933,20 +6163,16 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "Need restart!", + "description": "List of backups", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/backupmgr.BackupStat" + } } }, "403": { @@ -5960,29 +6186,20 @@ const docTemplate = `{ "schema": { "type": "string" } - }, - "503": { - "description": "No restart needed!\" \"Not a Valid Server!", - "schema": { - "type": "string" - } } } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/provision": { - "post": { - "description": "Provision the proxy service for a given cluster and proxy", - "consumes": [ - "application/json" - ], + "/api/clusters/{clusterName}/restic/task-queue": { + "get": { + "description": "Gets the restic task queue for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Provision Proxy Service", + "summary": "Get Restic Task Queue", "parameters": [ { "type": "string", @@ -5998,20 +6215,16 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "Proxy Service Provisioned", + "description": "Task queue fetched", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/backupmgr.ResticTask" + } } }, "403": { @@ -6021,7 +6234,7 @@ const docTemplate = `{ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", + "description": "No cluster", "schema": { "type": "string" } @@ -6029,19 +6242,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/staging/{isStaging}": { + "/api/clusters/{clusterName}/restic/task-queue/cancel/{taskID}": { "post": { - "description": "Set the proxy service for a given cluster and proxy to staging", - "consumes": [ - "application/json" - ], + "description": "Cancel the specified restic task for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Set Staging", + "summary": "Cancel Restic Task", "parameters": [ { "type": "string", @@ -6060,22 +6270,15 @@ const docTemplate = `{ }, { "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "Is Staging", - "name": "isStaging", + "description": "Task ID", + "name": "taskID", "in": "path", "required": true } ], "responses": { "200": { - "description": "Proxy Service Set to Staging", + "description": "Task cancelled", "schema": { "type": "string" } @@ -6087,13 +6290,7 @@ const docTemplate = `{ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", - "schema": { - "type": "string" - } - }, - "503": { - "description": "Not a Valid Server!", + "description": "No cluster", "schema": { "type": "string" } @@ -6101,19 +6298,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/start": { + "/api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}": { "post": { - "description": "Start the proxy service for a given cluster and proxy", - "consumes": [ - "application/json" - ], + "description": "Modify the restic task queue for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Start Proxy Service", + "summary": "Modify Restic Task Queue", "parameters": [ { "type": "string", @@ -6131,16 +6325,28 @@ const docTemplate = `{ "required": true }, { + "enum": [ + "first", + "after", + "last" + ], "type": "string", - "description": "Proxy Name", - "name": "proxyName", + "description": "Move Type", + "name": "moveType", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Task ID", + "name": "taskID", "in": "path", "required": true } ], "responses": { "200": { - "description": "Proxy Service Started", + "description": "Task queue modified", "schema": { "type": "string" } @@ -6152,7 +6358,7 @@ const docTemplate = `{ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", + "description": "No cluster", "schema": { "type": "string" } @@ -6160,19 +6366,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/stop": { + "/api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}/{afterID}": { "post": { - "description": "Stop the proxy service for a given cluster and proxy", - "consumes": [ - "application/json" - ], + "description": "Modify the restic task queue for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Stop Proxy Service", + "summary": "Modify Restic Task Queue", "parameters": [ { "type": "string", @@ -6190,16 +6393,34 @@ const docTemplate = `{ "required": true }, { + "enum": [ + "first", + "after", + "last" + ], "type": "string", - "description": "Proxy Name", - "name": "proxyName", + "description": "Move Type", + "name": "moveType", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Task ID", + "name": "taskID", "in": "path", "required": true + }, + { + "type": "string", + "description": "After ID", + "name": "afterID", + "in": "path" } ], "responses": { "200": { - "description": "Proxy Service Stopped", + "description": "Task queue modified", "schema": { "type": "string" } @@ -6211,7 +6432,7 @@ const docTemplate = `{ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", + "description": "No cluster", "schema": { "type": "string" } @@ -6219,19 +6440,16 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/unprovision": { - "post": { - "description": "Unprovision the proxy service for a given cluster and proxy", - "consumes": [ - "application/json" - ], + "/api/clusters/{clusterName}/restic/task-queue/reset": { + "get": { + "description": "Empty the restic task queue for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Unprovision Proxy Service", + "summary": "Reset Restic Task Queue", "parameters": [ { "type": "string", @@ -6247,18 +6465,11 @@ const docTemplate = `{ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "Proxy Service Unprovisioned", + "description": "Task queue reset", "schema": { "type": "string" } @@ -6270,7 +6481,7 @@ const docTemplate = `{ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", + "description": "No cluster", "schema": { "type": "string" } @@ -6278,16 +6489,21 @@ const docTemplate = `{ } } }, - "/api/clusters/{clusterName}/queryrules": { - "get": { - "description": "This endpoint retrieves the query rules for the specified cluster.", + "/api/clusters/{clusterName}/restic/task-queue/resume": { + "post": { + "responses": {} + } + }, + "/api/clusters/{clusterName}/restic/unlock": { + "post": { + "description": "Unlocks the restic repository for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Cluster" + "ClusterRestic" ], - "summary": "Retrieve query rules for a specific cluster", + "summary": "Unlock Restic Repository", "parameters": [ { "type": "string", @@ -6307,13 +6523,9 @@ const docTemplate = `{ ], "responses": { "200": { - "description": "List of query rules", + "description": "Restic repository unlocked", "schema": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": true - } + "type": "string" } }, "403": { @@ -17917,7 +18129,84 @@ const docTemplate = `{ } }, "definitions": { - "archiver.BackupStat": { + "backupmgr.BackupMetaMap": { + "type": "object" + }, + "backupmgr.BackupMetadata": { + "type": "object", + "properties": { + "backupMethod": { + "type": "integer" + }, + "backupStrategy": { + "type": "integer" + }, + "backupTool": { + "type": "string" + }, + "backupToolVersion": { + "type": "string" + }, + "binLogFileName": { + "type": "string" + }, + "binLogFilePos": { + "type": "integer" + }, + "binLogUuid": { + "type": "string" + }, + "checksum": { + "type": "string" + }, + "completed": { + "type": "boolean" + }, + "compressed": { + "type": "boolean" + }, + "dest": { + "type": "string" + }, + "encrypted": { + "type": "boolean" + }, + "encryptionAlgo": { + "type": "string" + }, + "encryptionKey": { + "type": "string" + }, + "endTime": { + "type": "string" + }, + "fileCount": { + "type": "integer" + }, + "id": { + "type": "integer" + }, + "previous": { + "type": "integer" + }, + "retentionDays": { + "type": "integer" + }, + "size": { + "type": "integer" + }, + "source": { + "type": "string" + }, + "splitUser": { + "type": "boolean" + }, + "startTime": { + "type": "string" + } + } + }, + "backupmgr.BackupStat": { "type": "object", "properties": { "total_blob_count": { @@ -17931,61 +18220,77 @@ const docTemplate = `{ } } }, - "archiver.ResticPurgeOption": { + "backupmgr.PointInTimeMeta": { + "type": "object", + "properties": { + "backup": { + "type": "integer", + "format": "int64" + }, + "isInPITR": { + "type": "boolean" + }, + "restoreTime": { + "type": "integer", + "format": "int64" + }, + "useBinlog": { + "type": "boolean" + } + } + }, + "backupmgr.ResticPurgeOption": { "type": "object", "properties": { - "keepDaily": { + "keep_daily": { "type": "integer" }, - "keepHourly": { + "keep_hourly": { "type": "integer" }, - "keepLast": { + "keep_last": { "type": "integer" }, - "keepMonthly": { + "keep_monthly": { "type": "integer" }, - "keepWeekly": { + "keep_weekly": { "type": "integer" }, - "keepWithin": { + "keep_within": { "type": "string" }, - "keepWithinDaily": { + "keep_within_daily": { "type": "string" }, - "keepWithinHourly": { + "keep_within_hourly": { "type": "string" }, - "keepWithinMonthly": { + "keep_within_monthly": { "type": "string" }, - "keepWithinWeekly": { + "keep_within_weekly": { "type": "string" }, - "keepWithinYearly": { + "keep_within_yearly": { "type": "string" }, - "keepYearly": { + "keep_yearly": { "type": "integer" + }, + "snapshot_id": { + "type": "string" } } }, - "archiver.ResticTask": { + "backupmgr.ResticTask": { "type": "object", "properties": { "dir_path": { "type": "string" }, - "error_state": { - "$ref": "#/definitions/github_com_signal18_replication-manager_utils_state.State" - }, - "new_pass_file": { - "type": "string" - }, "opt": { - "$ref": "#/definitions/archiver.ResticPurgeOption" + "$ref": "#/definitions/backupmgr.ResticPurgeOption" }, "tags": { "type": "array", @@ -17997,23 +18302,27 @@ const docTemplate = `{ "type": "integer" }, "task_type": { - "$ref": "#/definitions/archiver.TaskType" + "$ref": "#/definitions/backupmgr.TaskType" } } }, - "archiver.TaskType": { + "backupmgr.TaskType": { "type": "integer", "enum": [ 0, 1, 2, - 3 + 3, + 4, + 5 ], "x-enum-varnames": [ + "InitTask", "FetchTask", - "PurgeTask", "BackupTask", - "UnlockTask" + "PurgeTask", + "UnlockTask", + "ChangePassTask" ] }, "cluster.APIUser": { @@ -18243,7 +18552,7 @@ const docTemplate = `{ } }, "backupList": { - "$ref": "#/definitions/config.BackupMetaMap" + "$ref": "#/definitions/backupmgr.BackupMetaMap" }, "backupStat": { "$ref": "#/definitions/repmanv3.BackupStat" @@ -18794,10 +19103,10 @@ const docTemplate = `{ "type": "object", "properties": { "logical": { - "$ref": "#/definitions/config.BackupMetadata" + "$ref": "#/definitions/backupmgr.BackupMetadata" }, "physical": { - "$ref": "#/definitions/config.BackupMetadata" + "$ref": "#/definitions/backupmgr.BackupMetadata" } } }, @@ -19196,7 +19505,7 @@ const docTemplate = `{ "$ref": "#/definitions/config.StringsMap" }, "pointInTimeMeta": { - "$ref": "#/definitions/config.PointInTimeMeta" + "$ref": "#/definitions/backupmgr.PointInTimeMeta" }, "port": { "type": "string" @@ -19495,80 +19804,6 @@ const docTemplate = `{ } } }, - "config.BackupMetaMap": { - "type": "object" - }, - "config.BackupMetadata": { - "type": "object", - "properties": { - "backupMethod": { - "type": "integer" - }, - "backupStrategy": { - "type": "integer" - }, - "backupTool": { - "type": "string" - }, - "backupToolVersion": { - "type": "string" - }, - "binLogFileName": { - "type": "string" - }, - "binLogFilePos": { - "type": "integer" - }, - "binLogUuid": { - "type": "string" - }, - "checksum": { - "type": "string" - }, - "completed": { - "type": "boolean" - }, - "compressed": { - "type": "boolean" - }, - "dest": { - "type": "string" - }, - "encrypted": { - "type": "boolean" - }, - "encryptionAlgo": { - "type": "string" - }, - "encryptionKey": { - "type": "string" - }, - "endTime": { - "type": "string" - }, - "id": { - "type": "integer" - }, - "previous": { - "type": "integer" - }, - "retentionDays": { - "type": "integer" - }, - "size": { - "type": "integer" - }, - "source": { - "type": "string" - }, - "splitUser": { - "type": "boolean" - }, - "startTime": { - "type": "string" - } - } - }, "config.ConfigVariableType": { "type": "object", "properties": { @@ -19751,25 +19986,6 @@ const docTemplate = `{ } } }, - "config.PointInTimeMeta": { - "type": "object", - "properties": { - "backup": { - "type": "integer", - "format": "int64" - }, - "isInPITR": { - "type": "boolean" - }, - "restoreTime": { - "type": "integer", - "format": "int64" - }, - "useBinlog": { - "type": "boolean" - } - } - }, "config.Role": { "type": "object", "properties": { @@ -20688,6 +20904,12 @@ const docTemplate = `{ "backupResticRepository": { "type": "string" }, + "backupResticRunQueueOnStartup": { + "type": "boolean" + }, + "backupResticSaveQueueOnShutdown": { + "type": "boolean" + }, "backupResticTimeout": { "type": "integer" }, @@ -21325,9 +21547,6 @@ const docTemplate = `{ "logAppLevel": { "type": "integer" }, - "logArchiveLevel": { - "type": "integer" - }, "logBackupStream": { "type": "boolean" }, @@ -21406,6 +21625,9 @@ const docTemplate = `{ "logProxyLevel": { "type": "integer" }, + "logResticLevel": { + "type": "integer" + }, "logRotateMaxAge": { "type": "integer" }, @@ -22712,26 +22934,6 @@ const docTemplate = `{ } } }, - "github_com_signal18_replication-manager_utils_state.State": { - "type": "object", - "properties": { - "errDesc": { - "type": "string" - }, - "errFrom": { - "type": "string" - }, - "errKey": { - "type": "string" - }, - "errType": { - "type": "string" - }, - "serverUrl": { - "type": "string" - } - } - }, "github_com_signal18_replication-manager_utils_version.Version": { "type": "object", "properties": { @@ -23426,6 +23628,9 @@ const docTemplate = `{ "level": { "type": "string" }, + "module": { + "type": "integer" + }, "text": { "type": "string" }, diff --git a/docs/swagger.json b/docs/swagger.json index 9e8627259..7669702a8 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -4230,7 +4230,7 @@ } } }, - "/api/clusters/{clusterName}/archives": { + "/api/clusters/{clusterName}/backups": { "get": { "description": "This endpoint retrieves the backups for the specified cluster.", "produces": [ @@ -4264,7 +4264,9 @@ "type": "array", "items": { "type": "object", - "additionalProperties": true + "additionalProperties": { + "$ref": "#/definitions/backupmgr.BackupMetadata" + } } } }, @@ -4283,16 +4285,16 @@ } } }, - "/api/clusters/{clusterName}/archives/fetch": { - "post": { - "description": "Fetches the restic backup for the specified cluster.", + "/api/clusters/{clusterName}/backups/stats": { + "get": { + "description": "This endpoint retrieves the backup stats for the specified cluster.", "produces": [ "application/json" ], "tags": [ "ClusterBackups" ], - "summary": "Fetch Archives", + "summary": "Retrieve backup stats for a specific cluster", "parameters": [ { "type": "string", @@ -4312,9 +4314,12 @@ ], "responses": { "200": { - "description": "Archives fetch queued", + "description": "List of backups", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/backupmgr.BackupStat" + } } }, "403": { @@ -4332,16 +4337,16 @@ } } }, - "/api/clusters/{clusterName}/archives/init": { - "post": { - "description": "Inits the restic backup for the specified cluster.", + "/api/clusters/{clusterName}/certificates": { + "get": { + "description": "This endpoint retrieves the client certificates for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "ClusterCertificates" ], - "summary": "Init Restic Backup", + "summary": "Retrieve client certificates for a specific cluster", "parameters": [ { "type": "string", @@ -4361,19 +4366,17 @@ ], "responses": { "200": { - "description": "Archives purge queued", - "schema": { - "type": "string" - } - }, - "403": { - "description": "No valid ACL", + "description": "List of client certificates", "schema": { - "type": "string" + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } } }, "500": { - "description": "No cluster", + "description": "Internal Server Error", "schema": { "type": "string" } @@ -4381,16 +4384,16 @@ } } }, - "/api/clusters/{clusterName}/archives/init/{force}": { - "post": { - "description": "Inits the restic backup for the specified cluster.", + "/api/clusters/{clusterName}/diffvariables": { + "get": { + "description": "This endpoint retrieves the variable differences for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cluster" ], - "summary": "Init Restic Backup", + "summary": "Retrieve variable differences for a specific cluster", "parameters": [ { "type": "string", @@ -4406,22 +4409,16 @@ "name": "clusterName", "in": "path", "required": true - }, - { - "enum": [ - "force" - ], - "type": "string", - "description": "Force init", - "name": "force", - "in": "path" } ], "responses": { "200": { - "description": "Archives purge queued", + "description": "List of variable differences", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/cluster.VariableDiff" + } } }, "403": { @@ -4439,16 +4436,19 @@ } } }, - "/api/clusters/{clusterName}/archives/purge": { + "/api/clusters/{clusterName}/docker/actions/registry-connect": { "post": { - "description": "Purges the restic backup for the specified cluster.", + "description": "Logs in to a Docker registry using the provided credentials.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Docker" ], - "summary": "Purge Restic Backup", + "summary": "Docker Registry Login", "parameters": [ { "type": "string", @@ -4464,11 +4464,26 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "Docker Registry Login Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.DockerRegistryLoginForm" + } } ], "responses": { "200": { - "description": "Archives purge queued", + "description": "Docker registry login successful", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Error decoding request body", "schema": { "type": "string" } @@ -4480,7 +4495,7 @@ } }, "500": { - "description": "No cluster", + "description": "Error creating request\" or \"Error making request to Docker registry\" or \"Docker registry login failed", "schema": { "type": "string" } @@ -4488,16 +4503,19 @@ } } }, - "/api/clusters/{clusterName}/archives/restore-config/{force}": { - "post": { - "description": "Restores the restic config for the specified cluster.", + "/api/clusters/{clusterName}/docker/browse/{imageRef}": { + "get": { + "description": "Lists files in a specified directory of a Docker image.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Docker" ], - "summary": "Restore Restic Config", + "summary": "List Files in Docker Image Directory", "parameters": [ { "type": "string", @@ -4516,16 +4534,21 @@ }, { "type": "string", - "default": "noforce", - "description": "Force Restore", - "name": "force", + "description": "Docker Image Reference", + "name": "imageRef", "in": "path", "required": true } ], "responses": { "200": { - "description": "Archives restore config done", + "description": "List of files in the directory", + "schema": { + "$ref": "#/definitions/treehelper.FileTreeCache" + } + }, + "400": { + "description": "Image reference or source directory not provided", "schema": { "type": "string" } @@ -4537,7 +4560,7 @@ } }, "500": { - "description": "No cluster", + "description": "Error listing files in image directory\" or \"Error encoding JSON", "schema": { "type": "string" } @@ -4545,16 +4568,19 @@ } } }, - "/api/clusters/{clusterName}/archives/stats": { - "get": { - "description": "This endpoint retrieves the backup stats for the specified cluster.", + "/api/clusters/{clusterName}/ext-role/accept": { + "post": { + "description": "This endpoint accepts external operations for the specified cluster.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cloud18" ], - "summary": "Retrieve backup stats for a specific cluster", + "summary": "Accept external operations for a specific cluster", "parameters": [ { "type": "string", @@ -4570,16 +4596,22 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "User Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.CloudUserForm" + } } ], "responses": { "200": { - "description": "List of backups", + "description": "Email sent to sponsor!", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/archiver.BackupStat" - } + "type": "string" } }, "403": { @@ -4589,7 +4621,7 @@ } }, "500": { - "description": "No cluster", + "description": "Error accepting subscription", "schema": { "type": "string" } @@ -4597,16 +4629,19 @@ } } }, - "/api/clusters/{clusterName}/archives/task-queue": { - "get": { - "description": "Gets the restic task queue for the specified cluster.", + "/api/clusters/{clusterName}/ext-role/quote": { + "post": { + "description": "This endpoint quotes external operations for the specified cluster.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cloud18" ], - "summary": "Get Archives Task Queue", + "summary": "Quote external operations for a specific cluster", "parameters": [ { "type": "string", @@ -4622,16 +4657,22 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "User Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.CloudUserForm" + } } ], "responses": { "200": { - "description": "Task queue fetched", + "description": "Email sent to sponsor!", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/archiver.ResticTask" - } + "type": "string" } }, "403": { @@ -4641,7 +4682,7 @@ } }, "500": { - "description": "No cluster", + "description": "Error accepting external operations", "schema": { "type": "string" } @@ -4649,16 +4690,19 @@ } } }, - "/api/clusters/{clusterName}/archives/task-queue/reset": { - "get": { - "description": "Empty the restic task queue for the specified cluster.", + "/api/clusters/{clusterName}/ext-role/refuse": { + "post": { + "description": "This endpoint rejects external operations for the specified cluster.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cloud18" ], - "summary": "Reset Archives Task Queue", + "summary": "Reject external operations for a specific cluster", "parameters": [ { "type": "string", @@ -4674,11 +4718,20 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "User Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.CloudUserForm" + } } ], "responses": { "200": { - "description": "Task queue reset", + "description": "Subscription removed!", "schema": { "type": "string" } @@ -4690,7 +4743,7 @@ } }, "500": { - "description": "No cluster", + "description": "Error removing subscription", "schema": { "type": "string" } @@ -4698,16 +4751,19 @@ } } }, - "/api/clusters/{clusterName}/archives/unlock": { + "/api/clusters/{clusterName}/ext-role/subscribe": { "post": { - "description": "Unlocks the restic backup for the specified cluster.", + "description": "This endpoint subscribes external operations for the specified cluster.", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "Cloud18" ], - "summary": "Unlock Restic Backup", + "summary": "subscribe external operations for a specific cluster", "parameters": [ { "type": "string", @@ -4723,11 +4779,20 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "description": "User Form", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/server.CloudUserForm" + } } ], "responses": { "200": { - "description": "Archives purge queued", + "description": "Email sent to sponsor!", "schema": { "type": "string" } @@ -4739,7 +4804,7 @@ } }, "500": { - "description": "No cluster", + "description": "Error subscribing external operations", "schema": { "type": "string" } @@ -4747,16 +4812,16 @@ } } }, - "/api/clusters/{clusterName}/backups": { + "/api/clusters/{clusterName}/graphite-filterlist": { "get": { - "description": "This endpoint retrieves the backups for the specified cluster.", + "description": "This endpoint retrieves the Graphite filter list for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "ClusterGraphite" ], - "summary": "Retrieve backups for a specific cluster", + "summary": "Retrieve Graphite filter list for a specific cluster", "parameters": [ { "type": "string", @@ -4776,23 +4841,16 @@ ], "responses": { "200": { - "description": "List of backups", + "description": "List of Graphite filters", "schema": { "type": "array", "items": { - "type": "object", - "additionalProperties": true + "type": "string" } } }, - "403": { - "description": "No valid ACL", - "schema": { - "type": "string" - } - }, "500": { - "description": "No cluster", + "description": "Internal Server Error", "schema": { "type": "string" } @@ -4800,16 +4858,16 @@ } } }, - "/api/clusters/{clusterName}/backups/stats": { + "/api/clusters/{clusterName}/health": { "get": { - "description": "This endpoint retrieves the backup stats for the specified cluster.", + "description": "Get the health status of the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterBackups" + "ClusterHealth" ], - "summary": "Retrieve backup stats for a specific cluster", + "summary": "Get Cluster Health", "parameters": [ { "type": "string", @@ -4829,12 +4887,9 @@ ], "responses": { "200": { - "description": "List of backups", + "description": "Cluster health fetched", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/archiver.BackupStat" - } + "$ref": "#/definitions/peer.PeerHealth" } }, "403": { @@ -4852,16 +4907,16 @@ } } }, - "/api/clusters/{clusterName}/certificates": { + "/api/clusters/{clusterName}/is-in-errstate/{errstate}": { "get": { - "description": "This endpoint retrieves the client certificates for the specified cluster.", + "description": "Checks if the specified cluster is in an error state.", "produces": [ "application/json" ], "tags": [ - "ClusterCertificates" + "ClusterHealth" ], - "summary": "Retrieve client certificates for a specific cluster", + "summary": "Check if Cluster is in Error State", "parameters": [ { "type": "string", @@ -4877,21 +4932,30 @@ "name": "clusterName", "in": "path", "required": true - } + }, + { + "type": "string", + "description": "State to check", + "name": "state", + "in": "path", + "required": true + } ], "responses": { "200": { - "description": "List of client certificates", + "description": "true\" or \"false", "schema": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": true - } + "type": "string" + } + }, + "403": { + "description": "No valid ACL", + "schema": { + "type": "string" } }, "500": { - "description": "Internal Server Error", + "description": "No cluster", "schema": { "type": "string" } @@ -4899,16 +4963,16 @@ } } }, - "/api/clusters/{clusterName}/diffvariables": { + "/api/clusters/{clusterName}/jobs": { "get": { - "description": "This endpoint retrieves the variable differences for the specified cluster.", + "description": "This endpoint retrieves the job entries for the specified cluster.", "produces": [ "application/json" ], "tags": [ "Cluster" ], - "summary": "Retrieve variable differences for a specific cluster", + "summary": "Retrieve job entries for a specific cluster", "parameters": [ { "type": "string", @@ -4928,11 +4992,12 @@ ], "responses": { "200": { - "description": "List of variable differences", + "description": "List of job entries", "schema": { "type": "array", "items": { - "$ref": "#/definitions/cluster.VariableDiff" + "type": "object", + "additionalProperties": true } } }, @@ -4943,7 +5008,7 @@ } }, "500": { - "description": "No cluster", + "description": "Cluster Not Found", "schema": { "type": "string" } @@ -4951,66 +5016,118 @@ } } }, - "/api/clusters/{clusterName}/docker/actions/registry-connect": { - "post": { - "description": "Logs in to a Docker registry using the provided credentials.", - "consumes": [ - "application/json" - ], + "/api/clusters/{clusterName}/jobs-log-level/{task}/{level}": { + "get": { + "description": "Checks if a specific log level is enabled for a given task in the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Docker" + "ClusterLogging" ], - "summary": "Docker Registry Login", + "summary": "Check Cluster Log Level", "parameters": [ { "type": "string", - "default": "Bearer \u003cAdd access token here\u003e", - "description": "Insert your access token", - "name": "Authorization", - "in": "header", + "description": "Cluster Name", + "name": "clusterName", + "in": "path", "required": true }, { + "enum": [ + "xtrabackup", + "mariabackup", + "errorlog", + "slowquery", + "sqlerrorlog", + "auditlog", + "zfssnapback", + "optimize", + "reseedxtrabackup", + "reseedmariabackup", + "reseedmysqldump", + "flashbackxtrabackup", + "flashbackmariadbackup", + "flashbackmysqldump", + "stop", + "restart", + "start", + "printdefault-current", + "printdefault-dummy", + "jobs-check", + "jobs-upgrade" + ], "type": "string", - "description": "Cluster Name", - "name": "clusterName", + "description": "Task Name", + "name": "task", "in": "path", "required": true }, { - "description": "Docker Registry Login Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.DockerRegistryLoginForm" - } + "enum": [ + "ERROR", + "WARN", + "INFO", + "DEBUG" + ], + "type": "string", + "description": "Log Level", + "name": "level", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Docker registry login successful", + "description": "true\" or \"false", "schema": { "type": "string" } }, - "400": { - "description": "Error decoding request body", + "403": { + "description": "No valid ACL", "schema": { "type": "string" } }, - "403": { - "description": "No valid ACL", + "500": { + "description": "No cluster", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/clusters/{clusterName}/need-rolling-reprov": { + "get": { + "description": "Checks if a specified cluster needs a rolling reprovision.", + "produces": [ + "text/plain" + ], + "tags": [ + "Database" + ], + "summary": "Check if a cluster needs a rolling reprovision", + "parameters": [ + { + "type": "string", + "description": "Cluster Name", + "name": "clusterName", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "200 -Need rolling reprov!", "schema": { "type": "string" } }, "500": { - "description": "Error creating request\" or \"Error making request to Docker registry\" or \"Docker registry login failed", + "description": "503 -No rolling reprov needed!\" or \"500 -No cluster", "schema": { "type": "string" } @@ -5018,19 +5135,51 @@ } } }, - "/api/clusters/{clusterName}/docker/browse/{imageRef}": { + "/api/clusters/{clusterName}/need-rolling-restart": { "get": { - "description": "Lists files in a specified directory of a Docker image.", - "consumes": [ - "application/json" + "description": "Checks if a specified cluster needs a rolling restart.", + "produces": [ + "text/plain" + ], + "tags": [ + "Database" + ], + "summary": "Check if a cluster needs a rolling restart", + "parameters": [ + { + "type": "string", + "description": "Cluster Name", + "name": "clusterName", + "in": "path", + "required": true + } ], + "responses": { + "200": { + "description": "200 -Need rolling restart!", + "schema": { + "type": "string" + } + }, + "500": { + "description": "503 -No rolling restart needed!\" or \"500 -No cluster", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/clusters/{clusterName}/opensvc-gateway": { + "get": { + "description": "Retrieves the gateway nodes of the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Docker" + "ClusterGateway" ], - "summary": "List Files in Docker Image Directory", + "summary": "Get Cluster Gateway Nodes", "parameters": [ { "type": "string", @@ -5046,26 +5195,16 @@ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Docker Image Reference", - "name": "imageRef", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "List of files in the directory", - "schema": { - "$ref": "#/definitions/treehelper.FileTreeCache" - } - }, - "400": { - "description": "Image reference or source directory not provided", + "description": "List of gateway nodes", "schema": { - "type": "string" + "type": "array", + "items": { + "type": "string" + } } }, "403": { @@ -5075,7 +5214,7 @@ } }, "500": { - "description": "Error listing files in image directory\" or \"Error encoding JSON", + "description": "No cluster\" or \"Error getting gateway nodes", "schema": { "type": "string" } @@ -5083,19 +5222,16 @@ } } }, - "/api/clusters/{clusterName}/ext-role/accept": { - "post": { - "description": "This endpoint accepts external operations for the specified cluster.", - "consumes": [ - "application/json" - ], + "/api/clusters/{clusterName}/opensvc-stats": { + "get": { + "description": "Retrieves the OpenSVC daemon status of the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Cloud18" + "ClusterGateway" ], - "summary": "Accept external operations for a specific cluster", + "summary": "Get OpenSVC Daemon Status", "parameters": [ { "type": "string", @@ -5111,22 +5247,16 @@ "name": "clusterName", "in": "path", "required": true - }, - { - "description": "User Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.CloudUserForm" - } } ], "responses": { "200": { - "description": "Email sent to sponsor!", + "description": "OpenSVC daemon status fetched", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/opensvc.DaemonNodeStats" + } } }, "403": { @@ -5136,7 +5266,7 @@ } }, "500": { - "description": "Error accepting subscription", + "description": "No cluster\" or \"Error getting OpenSVC stats", "schema": { "type": "string" } @@ -5144,9 +5274,49 @@ } } }, - "/api/clusters/{clusterName}/ext-role/quote": { - "post": { - "description": "This endpoint quotes external operations for the specified cluster.", + "/api/clusters/{clusterName}/proxies/{proxyName}": { + "get": { + "description": "Shows the proxies for that specific named cluster", + "tags": [ + "Proxies" + ], + "summary": "Shows the proxies for that specific named cluster", + "parameters": [ + { + "type": "string", + "default": "Bearer \u003cAdd access token here\u003e", + "description": "Insert your access token", + "name": "Authorization", + "in": "header", + "required": true + }, + { + "type": "string", + "description": "Cluster Name", + "name": "clusterName", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Server details retrieved successfully", + "schema": { + "$ref": "#/definitions/cluster.Proxy" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/need-reprov": { + "get": { + "description": "Check if the proxy service for a given cluster and proxy needs reprovisioning", "consumes": [ "application/json" ], @@ -5154,9 +5324,9 @@ "application/json" ], "tags": [ - "Cloud18" + "Proxies" ], - "summary": "Quote external operations for a specific cluster", + "summary": "Check if Proxy Needs Reprovision", "parameters": [ { "type": "string", @@ -5174,18 +5344,16 @@ "required": true }, { - "description": "User Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.CloudUserForm" - } + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Email sent to sponsor!", + "description": "Need reprov!", "schema": { "type": "string" } @@ -5197,17 +5365,23 @@ } }, "500": { - "description": "Error accepting external operations", + "description": "No cluster", + "schema": { + "type": "string" + } + }, + "503": { + "description": "No reprov needed!\" \"Not a Valid Server!", "schema": { "type": "string" } } } } - }, - "/api/clusters/{clusterName}/ext-role/refuse": { - "post": { - "description": "This endpoint rejects external operations for the specified cluster.", + }, + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/need-restart": { + "get": { + "description": "Check if the proxy service for a given cluster and proxy needs a restart", "consumes": [ "application/json" ], @@ -5215,9 +5389,9 @@ "application/json" ], "tags": [ - "Cloud18" + "Proxies" ], - "summary": "Reject external operations for a specific cluster", + "summary": "Check if Proxy Needs Restart", "parameters": [ { "type": "string", @@ -5235,18 +5409,16 @@ "required": true }, { - "description": "User Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.CloudUserForm" - } + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Subscription removed!", + "description": "Need restart!", "schema": { "type": "string" } @@ -5258,7 +5430,13 @@ } }, "500": { - "description": "Error removing subscription", + "description": "No cluster", + "schema": { + "type": "string" + } + }, + "503": { + "description": "No restart needed!\" \"Not a Valid Server!", "schema": { "type": "string" } @@ -5266,9 +5444,9 @@ } } }, - "/api/clusters/{clusterName}/ext-role/subscribe": { + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/provision": { "post": { - "description": "This endpoint subscribes external operations for the specified cluster.", + "description": "Provision the proxy service for a given cluster and proxy", "consumes": [ "application/json" ], @@ -5276,9 +5454,9 @@ "application/json" ], "tags": [ - "Cloud18" + "Proxies" ], - "summary": "subscribe external operations for a specific cluster", + "summary": "Provision Proxy Service", "parameters": [ { "type": "string", @@ -5296,18 +5474,16 @@ "required": true }, { - "description": "User Form", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/server.CloudUserForm" - } + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Email sent to sponsor!", + "description": "Proxy Service Provisioned", "schema": { "type": "string" } @@ -5319,7 +5495,7 @@ } }, "500": { - "description": "Error subscribing external operations", + "description": "Cluster Not Found\" \"Server Not Found", "schema": { "type": "string" } @@ -5327,16 +5503,19 @@ } } }, - "/api/clusters/{clusterName}/graphite-filterlist": { - "get": { - "description": "This endpoint retrieves the Graphite filter list for the specified cluster.", + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/staging/{isStaging}": { + "post": { + "description": "Set the proxy service for a given cluster and proxy to staging", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterGraphite" + "Proxies" ], - "summary": "Retrieve Graphite filter list for a specific cluster", + "summary": "Set Staging", "parameters": [ { "type": "string", @@ -5352,20 +5531,43 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Is Staging", + "name": "isStaging", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "List of Graphite filters", + "description": "Proxy Service Set to Staging", "schema": { - "type": "array", - "items": { - "type": "string" - } + "type": "string" + } + }, + "403": { + "description": "No valid ACL", + "schema": { + "type": "string" } }, "500": { - "description": "Internal Server Error", + "description": "Cluster Not Found\" \"Server Not Found", + "schema": { + "type": "string" + } + }, + "503": { + "description": "Not a Valid Server!", "schema": { "type": "string" } @@ -5373,16 +5575,19 @@ } } }, - "/api/clusters/{clusterName}/health": { - "get": { - "description": "Get the health status of the specified cluster.", + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/start": { + "post": { + "description": "Start the proxy service for a given cluster and proxy", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterHealth" + "Proxies" ], - "summary": "Get Cluster Health", + "summary": "Start Proxy Service", "parameters": [ { "type": "string", @@ -5398,13 +5603,20 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Cluster health fetched", + "description": "Proxy Service Started", "schema": { - "$ref": "#/definitions/peer.PeerHealth" + "type": "string" } }, "403": { @@ -5414,7 +5626,7 @@ } }, "500": { - "description": "No cluster", + "description": "Cluster Not Found\" \"Server Not Found", "schema": { "type": "string" } @@ -5422,16 +5634,19 @@ } } }, - "/api/clusters/{clusterName}/is-in-errstate/{errstate}": { - "get": { - "description": "Checks if the specified cluster is in an error state.", + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/stop": { + "post": { + "description": "Stop the proxy service for a given cluster and proxy", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "ClusterHealth" + "Proxies" ], - "summary": "Check if Cluster is in Error State", + "summary": "Stop Proxy Service", "parameters": [ { "type": "string", @@ -5450,15 +5665,15 @@ }, { "type": "string", - "description": "State to check", - "name": "state", + "description": "Proxy Name", + "name": "proxyName", "in": "path", "required": true } ], "responses": { "200": { - "description": "true\" or \"false", + "description": "Proxy Service Stopped", "schema": { "type": "string" } @@ -5470,7 +5685,7 @@ } }, "500": { - "description": "No cluster", + "description": "Cluster Not Found\" \"Server Not Found", "schema": { "type": "string" } @@ -5478,16 +5693,19 @@ } } }, - "/api/clusters/{clusterName}/jobs": { - "get": { - "description": "This endpoint retrieves the job entries for the specified cluster.", + "/api/clusters/{clusterName}/proxies/{proxyName}/actions/unprovision": { + "post": { + "description": "Unprovision the proxy service for a given cluster and proxy", + "consumes": [ + "application/json" + ], "produces": [ "application/json" ], "tags": [ - "Cluster" + "Proxies" ], - "summary": "Retrieve job entries for a specific cluster", + "summary": "Unprovision Proxy Service", "parameters": [ { "type": "string", @@ -5503,17 +5721,20 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "description": "Proxy Name", + "name": "proxyName", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "List of job entries", + "description": "Proxy Service Unprovisioned", "schema": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": true - } + "type": "string" } }, "403": { @@ -5523,7 +5744,7 @@ } }, "500": { - "description": "Cluster Not Found", + "description": "Cluster Not Found\" \"Server Not Found", "schema": { "type": "string" } @@ -5531,73 +5752,42 @@ } } }, - "/api/clusters/{clusterName}/jobs-log-level/{task}/{level}": { + "/api/clusters/{clusterName}/queryrules": { "get": { - "description": "Checks if a specific log level is enabled for a given task in the specified cluster.", + "description": "This endpoint retrieves the query rules for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterLogging" + "Cluster" ], - "summary": "Check Cluster Log Level", + "summary": "Retrieve query rules for a specific cluster", "parameters": [ { "type": "string", - "description": "Cluster Name", - "name": "clusterName", - "in": "path", - "required": true - }, - { - "enum": [ - "xtrabackup", - "mariabackup", - "errorlog", - "slowquery", - "sqlerrorlog", - "auditlog", - "zfssnapback", - "optimize", - "reseedxtrabackup", - "reseedmariabackup", - "reseedmysqldump", - "flashbackxtrabackup", - "flashbackmariadbackup", - "flashbackmysqldump", - "stop", - "restart", - "start", - "printdefault-current", - "printdefault-dummy", - "jobs-check", - "jobs-upgrade" - ], - "type": "string", - "description": "Task Name", - "name": "task", - "in": "path", + "default": "Bearer \u003cAdd access token here\u003e", + "description": "Insert your access token", + "name": "Authorization", + "in": "header", "required": true }, { - "enum": [ - "ERROR", - "WARN", - "INFO", - "DEBUG" - ], "type": "string", - "description": "Log Level", - "name": "level", + "description": "Cluster Name", + "name": "clusterName", "in": "path", "required": true } ], "responses": { "200": { - "description": "true\" or \"false", + "description": "List of query rules", "schema": { - "type": "string" + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } } }, "403": { @@ -5615,17 +5805,25 @@ } } }, - "/api/clusters/{clusterName}/need-rolling-reprov": { - "get": { - "description": "Checks if a specified cluster needs a rolling reprovision.", + "/api/clusters/{clusterName}/restic/fetch": { + "post": { + "description": "Fetches the restic backup for the specified cluster.", "produces": [ - "text/plain" + "application/json" ], "tags": [ - "Database" + "ClusterRestic" ], - "summary": "Check if a cluster needs a rolling reprovision", + "summary": "Fetch Restic Snapshots", "parameters": [ + { + "type": "string", + "default": "Bearer \u003cAdd access token here\u003e", + "description": "Insert your access token", + "name": "Authorization", + "in": "header", + "required": true + }, { "type": "string", "description": "Cluster Name", @@ -5636,13 +5834,19 @@ ], "responses": { "200": { - "description": "200 -Need rolling reprov!", + "description": "Restic snapshots fetch queued", + "schema": { + "type": "string" + } + }, + "403": { + "description": "No valid ACL", "schema": { "type": "string" } }, "500": { - "description": "503 -No rolling reprov needed!\" or \"500 -No cluster", + "description": "No cluster", "schema": { "type": "string" } @@ -5650,17 +5854,25 @@ } } }, - "/api/clusters/{clusterName}/need-rolling-restart": { - "get": { - "description": "Checks if a specified cluster needs a rolling restart.", + "/api/clusters/{clusterName}/restic/init": { + "post": { + "description": "Inits the restic repository for the specified cluster.", "produces": [ - "text/plain" + "application/json" ], "tags": [ - "Database" + "ClusterRestic" ], - "summary": "Check if a cluster needs a rolling restart", + "summary": "Init Restic Repository", "parameters": [ + { + "type": "string", + "default": "Bearer \u003cAdd access token here\u003e", + "description": "Insert your access token", + "name": "Authorization", + "in": "header", + "required": true + }, { "type": "string", "description": "Cluster Name", @@ -5671,13 +5883,19 @@ ], "responses": { "200": { - "description": "200 -Need rolling restart!", + "description": "Restic repository initialized", + "schema": { + "type": "string" + } + }, + "403": { + "description": "No valid ACL", "schema": { "type": "string" } }, "500": { - "description": "503 -No rolling restart needed!\" or \"500 -No cluster", + "description": "No cluster", "schema": { "type": "string" } @@ -5685,16 +5903,16 @@ } } }, - "/api/clusters/{clusterName}/opensvc-gateway": { - "get": { - "description": "Retrieves the gateway nodes of the specified cluster.", + "/api/clusters/{clusterName}/restic/init/{force}": { + "post": { + "description": "Inits the restic repository for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterGateway" + "ClusterRestic" ], - "summary": "Get Cluster Gateway Nodes", + "summary": "Init Restic Repository", "parameters": [ { "type": "string", @@ -5710,16 +5928,22 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "enum": [ + "force" + ], + "type": "string", + "description": "Force init", + "name": "force", + "in": "path" } ], "responses": { "200": { - "description": "List of gateway nodes", + "description": "Restic repository initialized", "schema": { - "type": "array", - "items": { - "type": "string" - } + "type": "string" } }, "403": { @@ -5729,7 +5953,7 @@ } }, "500": { - "description": "No cluster\" or \"Error getting gateway nodes", + "description": "No cluster", "schema": { "type": "string" } @@ -5737,16 +5961,16 @@ } } }, - "/api/clusters/{clusterName}/opensvc-stats": { - "get": { - "description": "Retrieves the OpenSVC daemon status of the specified cluster.", + "/api/clusters/{clusterName}/restic/purge/{snapshotID}": { + "post": { + "description": "Purges the restic backup for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "ClusterGateway" + "ClusterRestic" ], - "summary": "Get OpenSVC Daemon Status", + "summary": "Purge Restic Backup", "parameters": [ { "type": "string", @@ -5762,16 +5986,20 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "description": "Snapshot ID", + "name": "snapshotID", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "OpenSVC daemon status fetched", + "description": "Restic repository purged", "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/opensvc.DaemonNodeStats" - } + "type": "string" } }, "403": { @@ -5781,7 +6009,7 @@ } }, "500": { - "description": "No cluster\" or \"Error getting OpenSVC stats", + "description": "No cluster", "schema": { "type": "string" } @@ -5789,13 +6017,16 @@ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}": { - "get": { - "description": "Shows the proxies for that specific named cluster", + "/api/clusters/{clusterName}/restic/restore-config/{force}": { + "post": { + "description": "Restores the restic config for the specified cluster.", + "produces": [ + "application/json" + ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Shows the proxies for that specific named cluster", + "summary": "Restore Restic Config", "parameters": [ { "type": "string", @@ -5811,17 +6042,31 @@ "name": "clusterName", "in": "path", "required": true + }, + { + "type": "string", + "default": "noforce", + "description": "Force Restore", + "name": "force", + "in": "path", + "required": true } ], "responses": { "200": { - "description": "Server details retrieved successfully", + "description": "Restic config restore done", "schema": { - "$ref": "#/definitions/cluster.Proxy" + "type": "string" + } + }, + "403": { + "description": "No valid ACL", + "schema": { + "type": "string" } }, "500": { - "description": "Internal Server Error", + "description": "No cluster", "schema": { "type": "string" } @@ -5829,19 +6074,16 @@ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/need-reprov": { + "/api/clusters/{clusterName}/restic/snapshots": { "get": { - "description": "Check if the proxy service for a given cluster and proxy needs reprovisioning", - "consumes": [ - "application/json" - ], + "description": "This endpoint retrieves the backups for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Check if Proxy Needs Reprovision", + "summary": "Retrieve backups for a specific cluster", "parameters": [ { "type": "string", @@ -5857,20 +6099,17 @@ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "Need reprov!", + "description": "List of backups", "schema": { - "type": "string" + "type": "array", + "items": { + "type": "object", + "additionalProperties": true + } } }, "403": { @@ -5884,29 +6123,20 @@ "schema": { "type": "string" } - }, - "503": { - "description": "No reprov needed!\" \"Not a Valid Server!", - "schema": { - "type": "string" - } } } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/need-restart": { + "/api/clusters/{clusterName}/restic/stats": { "get": { - "description": "Check if the proxy service for a given cluster and proxy needs a restart", - "consumes": [ - "application/json" - ], + "description": "This endpoint retrieves the backup stats for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Check if Proxy Needs Restart", + "summary": "Retrieve backup stats for a specific cluster", "parameters": [ { "type": "string", @@ -5922,20 +6152,16 @@ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "Need restart!", + "description": "List of backups", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/backupmgr.BackupStat" + } } }, "403": { @@ -5949,29 +6175,20 @@ "schema": { "type": "string" } - }, - "503": { - "description": "No restart needed!\" \"Not a Valid Server!", - "schema": { - "type": "string" - } } } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/provision": { - "post": { - "description": "Provision the proxy service for a given cluster and proxy", - "consumes": [ - "application/json" - ], + "/api/clusters/{clusterName}/restic/task-queue": { + "get": { + "description": "Gets the restic task queue for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Provision Proxy Service", + "summary": "Get Restic Task Queue", "parameters": [ { "type": "string", @@ -5987,20 +6204,16 @@ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "Proxy Service Provisioned", + "description": "Task queue fetched", "schema": { - "type": "string" + "type": "array", + "items": { + "$ref": "#/definitions/backupmgr.ResticTask" + } } }, "403": { @@ -6010,7 +6223,7 @@ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", + "description": "No cluster", "schema": { "type": "string" } @@ -6018,19 +6231,16 @@ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/staging/{isStaging}": { + "/api/clusters/{clusterName}/restic/task-queue/cancel/{taskID}": { "post": { - "description": "Set the proxy service for a given cluster and proxy to staging", - "consumes": [ - "application/json" - ], + "description": "Cancel the specified restic task for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Set Staging", + "summary": "Cancel Restic Task", "parameters": [ { "type": "string", @@ -6049,22 +6259,15 @@ }, { "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true - }, - { - "type": "string", - "description": "Is Staging", - "name": "isStaging", + "description": "Task ID", + "name": "taskID", "in": "path", "required": true } ], "responses": { "200": { - "description": "Proxy Service Set to Staging", + "description": "Task cancelled", "schema": { "type": "string" } @@ -6076,13 +6279,7 @@ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", - "schema": { - "type": "string" - } - }, - "503": { - "description": "Not a Valid Server!", + "description": "No cluster", "schema": { "type": "string" } @@ -6090,19 +6287,16 @@ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/start": { + "/api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}": { "post": { - "description": "Start the proxy service for a given cluster and proxy", - "consumes": [ - "application/json" - ], + "description": "Modify the restic task queue for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Start Proxy Service", + "summary": "Modify Restic Task Queue", "parameters": [ { "type": "string", @@ -6120,16 +6314,28 @@ "required": true }, { + "enum": [ + "first", + "after", + "last" + ], "type": "string", - "description": "Proxy Name", - "name": "proxyName", + "description": "Move Type", + "name": "moveType", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Task ID", + "name": "taskID", "in": "path", "required": true } ], "responses": { "200": { - "description": "Proxy Service Started", + "description": "Task queue modified", "schema": { "type": "string" } @@ -6141,7 +6347,7 @@ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", + "description": "No cluster", "schema": { "type": "string" } @@ -6149,19 +6355,16 @@ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/stop": { + "/api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}/{afterID}": { "post": { - "description": "Stop the proxy service for a given cluster and proxy", - "consumes": [ - "application/json" - ], + "description": "Modify the restic task queue for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Stop Proxy Service", + "summary": "Modify Restic Task Queue", "parameters": [ { "type": "string", @@ -6179,16 +6382,34 @@ "required": true }, { + "enum": [ + "first", + "after", + "last" + ], "type": "string", - "description": "Proxy Name", - "name": "proxyName", + "description": "Move Type", + "name": "moveType", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Task ID", + "name": "taskID", "in": "path", "required": true + }, + { + "type": "string", + "description": "After ID", + "name": "afterID", + "in": "path" } ], "responses": { "200": { - "description": "Proxy Service Stopped", + "description": "Task queue modified", "schema": { "type": "string" } @@ -6200,7 +6421,7 @@ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", + "description": "No cluster", "schema": { "type": "string" } @@ -6208,19 +6429,16 @@ } } }, - "/api/clusters/{clusterName}/proxies/{proxyName}/actions/unprovision": { - "post": { - "description": "Unprovision the proxy service for a given cluster and proxy", - "consumes": [ - "application/json" - ], + "/api/clusters/{clusterName}/restic/task-queue/reset": { + "get": { + "description": "Empty the restic task queue for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Proxies" + "ClusterRestic" ], - "summary": "Unprovision Proxy Service", + "summary": "Reset Restic Task Queue", "parameters": [ { "type": "string", @@ -6236,18 +6454,11 @@ "name": "clusterName", "in": "path", "required": true - }, - { - "type": "string", - "description": "Proxy Name", - "name": "proxyName", - "in": "path", - "required": true } ], "responses": { "200": { - "description": "Proxy Service Unprovisioned", + "description": "Task queue reset", "schema": { "type": "string" } @@ -6259,7 +6470,7 @@ } }, "500": { - "description": "Cluster Not Found\" \"Server Not Found", + "description": "No cluster", "schema": { "type": "string" } @@ -6267,16 +6478,21 @@ } } }, - "/api/clusters/{clusterName}/queryrules": { - "get": { - "description": "This endpoint retrieves the query rules for the specified cluster.", + "/api/clusters/{clusterName}/restic/task-queue/resume": { + "post": { + "responses": {} + } + }, + "/api/clusters/{clusterName}/restic/unlock": { + "post": { + "description": "Unlocks the restic repository for the specified cluster.", "produces": [ "application/json" ], "tags": [ - "Cluster" + "ClusterRestic" ], - "summary": "Retrieve query rules for a specific cluster", + "summary": "Unlock Restic Repository", "parameters": [ { "type": "string", @@ -6296,13 +6512,9 @@ ], "responses": { "200": { - "description": "List of query rules", + "description": "Restic repository unlocked", "schema": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": true - } + "type": "string" } }, "403": { @@ -17906,7 +18118,84 @@ } }, "definitions": { - "archiver.BackupStat": { + "backupmgr.BackupMetaMap": { + "type": "object" + }, + "backupmgr.BackupMetadata": { + "type": "object", + "properties": { + "backupMethod": { + "type": "integer" + }, + "backupStrategy": { + "type": "integer" + }, + "backupTool": { + "type": "string" + }, + "backupToolVersion": { + "type": "string" + }, + "binLogFileName": { + "type": "string" + }, + "binLogFilePos": { + "type": "integer" + }, + "binLogUuid": { + "type": "string" + }, + "checksum": { + "type": "string" + }, + "completed": { + "type": "boolean" + }, + "compressed": { + "type": "boolean" + }, + "dest": { + "type": "string" + }, + "encrypted": { + "type": "boolean" + }, + "encryptionAlgo": { + "type": "string" + }, + "encryptionKey": { + "type": "string" + }, + "endTime": { + "type": "string" + }, + "fileCount": { + "type": "integer" + }, + "id": { + "type": "integer" + }, + "previous": { + "type": "integer" + }, + "retentionDays": { + "type": "integer" + }, + "size": { + "type": "integer" + }, + "source": { + "type": "string" + }, + "splitUser": { + "type": "boolean" + }, + "startTime": { + "type": "string" + } + } + }, + "backupmgr.BackupStat": { "type": "object", "properties": { "total_blob_count": { @@ -17920,61 +18209,77 @@ } } }, - "archiver.ResticPurgeOption": { + "backupmgr.PointInTimeMeta": { + "type": "object", + "properties": { + "backup": { + "type": "integer", + "format": "int64" + }, + "isInPITR": { + "type": "boolean" + }, + "restoreTime": { + "type": "integer", + "format": "int64" + }, + "useBinlog": { + "type": "boolean" + } + } + }, + "backupmgr.ResticPurgeOption": { "type": "object", "properties": { - "keepDaily": { + "keep_daily": { "type": "integer" }, - "keepHourly": { + "keep_hourly": { "type": "integer" }, - "keepLast": { + "keep_last": { "type": "integer" }, - "keepMonthly": { + "keep_monthly": { "type": "integer" }, - "keepWeekly": { + "keep_weekly": { "type": "integer" }, - "keepWithin": { + "keep_within": { "type": "string" }, - "keepWithinDaily": { + "keep_within_daily": { "type": "string" }, - "keepWithinHourly": { + "keep_within_hourly": { "type": "string" }, - "keepWithinMonthly": { + "keep_within_monthly": { "type": "string" }, - "keepWithinWeekly": { + "keep_within_weekly": { "type": "string" }, - "keepWithinYearly": { + "keep_within_yearly": { "type": "string" }, - "keepYearly": { + "keep_yearly": { "type": "integer" + }, + "snapshot_id": { + "type": "string" } } }, - "archiver.ResticTask": { + "backupmgr.ResticTask": { "type": "object", "properties": { "dir_path": { "type": "string" }, - "error_state": { - "$ref": "#/definitions/github_com_signal18_replication-manager_utils_state.State" - }, - "new_pass_file": { - "type": "string" - }, "opt": { - "$ref": "#/definitions/archiver.ResticPurgeOption" + "$ref": "#/definitions/backupmgr.ResticPurgeOption" }, "tags": { "type": "array", @@ -17986,23 +18291,27 @@ "type": "integer" }, "task_type": { - "$ref": "#/definitions/archiver.TaskType" + "$ref": "#/definitions/backupmgr.TaskType" } } }, - "archiver.TaskType": { + "backupmgr.TaskType": { "type": "integer", "enum": [ 0, 1, 2, - 3 + 3, + 4, + 5 ], "x-enum-varnames": [ + "InitTask", "FetchTask", - "PurgeTask", "BackupTask", - "UnlockTask" + "PurgeTask", + "UnlockTask", + "ChangePassTask" ] }, "cluster.APIUser": { @@ -18232,7 +18541,7 @@ } }, "backupList": { - "$ref": "#/definitions/config.BackupMetaMap" + "$ref": "#/definitions/backupmgr.BackupMetaMap" }, "backupStat": { "$ref": "#/definitions/repmanv3.BackupStat" @@ -18783,10 +19092,10 @@ "type": "object", "properties": { "logical": { - "$ref": "#/definitions/config.BackupMetadata" + "$ref": "#/definitions/backupmgr.BackupMetadata" }, "physical": { - "$ref": "#/definitions/config.BackupMetadata" + "$ref": "#/definitions/backupmgr.BackupMetadata" } } }, @@ -19185,7 +19494,7 @@ "$ref": "#/definitions/config.StringsMap" }, "pointInTimeMeta": { - "$ref": "#/definitions/config.PointInTimeMeta" + "$ref": "#/definitions/backupmgr.PointInTimeMeta" }, "port": { "type": "string" @@ -19484,80 +19793,6 @@ } } }, - "config.BackupMetaMap": { - "type": "object" - }, - "config.BackupMetadata": { - "type": "object", - "properties": { - "backupMethod": { - "type": "integer" - }, - "backupStrategy": { - "type": "integer" - }, - "backupTool": { - "type": "string" - }, - "backupToolVersion": { - "type": "string" - }, - "binLogFileName": { - "type": "string" - }, - "binLogFilePos": { - "type": "integer" - }, - "binLogUuid": { - "type": "string" - }, - "checksum": { - "type": "string" - }, - "completed": { - "type": "boolean" - }, - "compressed": { - "type": "boolean" - }, - "dest": { - "type": "string" - }, - "encrypted": { - "type": "boolean" - }, - "encryptionAlgo": { - "type": "string" - }, - "encryptionKey": { - "type": "string" - }, - "endTime": { - "type": "string" - }, - "id": { - "type": "integer" - }, - "previous": { - "type": "integer" - }, - "retentionDays": { - "type": "integer" - }, - "size": { - "type": "integer" - }, - "source": { - "type": "string" - }, - "splitUser": { - "type": "boolean" - }, - "startTime": { - "type": "string" - } - } - }, "config.ConfigVariableType": { "type": "object", "properties": { @@ -19740,25 +19975,6 @@ } } }, - "config.PointInTimeMeta": { - "type": "object", - "properties": { - "backup": { - "type": "integer", - "format": "int64" - }, - "isInPITR": { - "type": "boolean" - }, - "restoreTime": { - "type": "integer", - "format": "int64" - }, - "useBinlog": { - "type": "boolean" - } - } - }, "config.Role": { "type": "object", "properties": { @@ -20677,6 +20893,12 @@ "backupResticRepository": { "type": "string" }, + "backupResticRunQueueOnStartup": { + "type": "boolean" + }, + "backupResticSaveQueueOnShutdown": { + "type": "boolean" + }, "backupResticTimeout": { "type": "integer" }, @@ -21314,9 +21536,6 @@ "logAppLevel": { "type": "integer" }, - "logArchiveLevel": { - "type": "integer" - }, "logBackupStream": { "type": "boolean" }, @@ -21395,6 +21614,9 @@ "logProxyLevel": { "type": "integer" }, + "logResticLevel": { + "type": "integer" + }, "logRotateMaxAge": { "type": "integer" }, @@ -22701,26 +22923,6 @@ } } }, - "github_com_signal18_replication-manager_utils_state.State": { - "type": "object", - "properties": { - "errDesc": { - "type": "string" - }, - "errFrom": { - "type": "string" - }, - "errKey": { - "type": "string" - }, - "errType": { - "type": "string" - }, - "serverUrl": { - "type": "string" - } - } - }, "github_com_signal18_replication-manager_utils_version.Version": { "type": "object", "properties": { @@ -23415,6 +23617,9 @@ "level": { "type": "string" }, + "module": { + "type": "integer" + }, "text": { "type": "string" }, diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 54aebffa1..0135b5c69 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -1,5 +1,56 @@ definitions: - archiver.BackupStat: + backupmgr.BackupMetaMap: + type: object + backupmgr.BackupMetadata: + properties: + backupMethod: + type: integer + backupStrategy: + type: integer + backupTool: + type: string + backupToolVersion: + type: string + binLogFileName: + type: string + binLogFilePos: + type: integer + binLogUuid: + type: string + checksum: + type: string + completed: + type: boolean + compressed: + type: boolean + dest: + type: string + encrypted: + type: boolean + encryptionAlgo: + type: string + encryptionKey: + type: string + endTime: + type: string + fileCount: + type: integer + id: + type: integer + previous: + type: integer + retentionDays: + type: integer + size: + type: integer + source: + type: string + splitUser: + type: boolean + startTime: + type: string + type: object + backupmgr.BackupStat: properties: total_blob_count: type: integer @@ -8,43 +59,54 @@ definitions: total_size: type: integer type: object - archiver.ResticPurgeOption: + backupmgr.PointInTimeMeta: properties: - keepDaily: + backup: + format: int64 + type: integer + isInPITR: + type: boolean + restoreTime: + format: int64 + type: integer + useBinlog: + type: boolean + type: object + backupmgr.ResticPurgeOption: + properties: + keep_daily: type: integer - keepHourly: + keep_hourly: type: integer - keepLast: + keep_last: type: integer - keepMonthly: + keep_monthly: type: integer - keepWeekly: + keep_weekly: type: integer - keepWithin: + keep_within: type: string - keepWithinDaily: + keep_within_daily: type: string - keepWithinHourly: + keep_within_hourly: type: string - keepWithinMonthly: + keep_within_monthly: type: string - keepWithinWeekly: + keep_within_weekly: type: string - keepWithinYearly: + keep_within_yearly: type: string - keepYearly: + keep_yearly: type: integer + snapshot_id: + type: string type: object - archiver.ResticTask: + backupmgr.ResticTask: properties: dir_path: type: string - error_state: - $ref: '#/definitions/github_com_signal18_replication-manager_utils_state.State' - new_pass_file: - type: string opt: - $ref: '#/definitions/archiver.ResticPurgeOption' + $ref: '#/definitions/backupmgr.ResticPurgeOption' tags: items: type: string @@ -52,20 +114,24 @@ definitions: task_id: type: integer task_type: - $ref: '#/definitions/archiver.TaskType' + $ref: '#/definitions/backupmgr.TaskType' type: object - archiver.TaskType: + backupmgr.TaskType: enum: - 0 - 1 - 2 - 3 + - 4 + - 5 type: integer x-enum-varnames: + - InitTask - FetchTask - - PurgeTask - BackupTask + - PurgeTask - UnlockTask + - ChangePassTask cluster.APIUser: properties: grants: @@ -216,7 +282,7 @@ definitions: $ref: '#/definitions/cluster.App' type: array backupList: - $ref: '#/definitions/config.BackupMetaMap' + $ref: '#/definitions/backupmgr.BackupMetaMap' backupStat: $ref: '#/definitions/repmanv3.BackupStat' blacklist: @@ -591,9 +657,9 @@ definitions: cluster.ServerBackupMeta: properties: logical: - $ref: '#/definitions/config.BackupMetadata' + $ref: '#/definitions/backupmgr.BackupMetadata' physical: - $ref: '#/definitions/config.BackupMetadata' + $ref: '#/definitions/backupmgr.BackupMetadata' type: object cluster.ServerDelayStat: properties: @@ -859,7 +925,7 @@ definitions: pfsInstruments: $ref: '#/definitions/config.StringsMap' pointInTimeMeta: - $ref: '#/definitions/config.PointInTimeMeta' + $ref: '#/definitions/backupmgr.PointInTimeMeta' port: type: string postgressDB: @@ -1058,55 +1124,6 @@ definitions: provAppTemplate: type: string type: object - config.BackupMetaMap: - type: object - config.BackupMetadata: - properties: - backupMethod: - type: integer - backupStrategy: - type: integer - backupTool: - type: string - backupToolVersion: - type: string - binLogFileName: - type: string - binLogFilePos: - type: integer - binLogUuid: - type: string - checksum: - type: string - completed: - type: boolean - compressed: - type: boolean - dest: - type: string - encrypted: - type: boolean - encryptionAlgo: - type: string - encryptionKey: - type: string - endTime: - type: string - id: - type: integer - previous: - type: integer - retentionDays: - type: integer - size: - type: integer - source: - type: string - splitUser: - type: boolean - startTime: - type: string - type: object config.ConfigVariableType: properties: available: @@ -1225,19 +1242,6 @@ definitions: volumename: type: string type: object - config.PointInTimeMeta: - properties: - backup: - format: int64 - type: integer - isInPITR: - type: boolean - restoreTime: - format: int64 - type: integer - useBinlog: - type: boolean - type: object config.Role: properties: enable: @@ -1853,6 +1857,10 @@ definitions: type: string backupResticRepository: type: string + backupResticRunQueueOnStartup: + type: boolean + backupResticSaveQueueOnShutdown: + type: boolean backupResticTimeout: type: integer backupRestoreMysqlUser: @@ -2278,8 +2286,6 @@ definitions: type: string logAppLevel: type: integer - logArchiveLevel: - type: integer logBackupStream: type: boolean logBackupStreamLevel: @@ -2332,6 +2338,8 @@ definitions: type: boolean logProxyLevel: type: integer + logResticLevel: + type: integer logRotateMaxAge: type: integer logRotateMaxBackup: @@ -3203,19 +3211,6 @@ definitions: withTarball: type: string type: object - github_com_signal18_replication-manager_utils_state.State: - properties: - errDesc: - type: string - errFrom: - type: string - errKey: - type: string - errType: - type: string - serverUrl: - type: string - type: object github_com_signal18_replication-manager_utils_version.Version: properties: dist: @@ -3678,6 +3673,8 @@ definitions: type: string level: type: string + module: + type: integer text: type: string timestamp: @@ -6624,7 +6621,7 @@ paths: summary: Get App Substitution Variables tags: - Apps - /api/clusters/{clusterName}/archives: + /api/clusters/{clusterName}/backups: get: description: This endpoint retrieves the backups for the specified cluster. parameters: @@ -6646,7 +6643,8 @@ paths: description: List of backups schema: items: - additionalProperties: true + additionalProperties: + $ref: '#/definitions/backupmgr.BackupMetadata' type: object type: array "403": @@ -6660,9 +6658,9 @@ paths: summary: Retrieve backups for a specific cluster tags: - ClusterBackups - /api/clusters/{clusterName}/archives/fetch: - post: - description: Fetches the restic backup for the specified cluster. + /api/clusters/{clusterName}/backups/stats: + get: + description: This endpoint retrieves the backup stats for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -6679,9 +6677,11 @@ paths: - application/json responses: "200": - description: Archives fetch queued + description: List of backups schema: - type: string + items: + $ref: '#/definitions/backupmgr.BackupStat' + type: array "403": description: No valid ACL schema: @@ -6690,12 +6690,13 @@ paths: description: No cluster schema: type: string - summary: Fetch Archives + summary: Retrieve backup stats for a specific cluster tags: - ClusterBackups - /api/clusters/{clusterName}/archives/init: - post: - description: Inits the restic backup for the specified cluster. + /api/clusters/{clusterName}/certificates: + get: + description: This endpoint retrieves the client certificates for the specified + cluster. parameters: - default: Bearer description: Insert your access token @@ -6712,23 +6713,23 @@ paths: - application/json responses: "200": - description: Archives purge queued - schema: - type: string - "403": - description: No valid ACL + description: List of client certificates schema: - type: string + items: + additionalProperties: true + type: object + type: array "500": - description: No cluster + description: Internal Server Error schema: type: string - summary: Init Restic Backup + summary: Retrieve client certificates for a specific cluster tags: - - ClusterBackups - /api/clusters/{clusterName}/archives/init/{force}: - post: - description: Inits the restic backup for the specified cluster. + - ClusterCertificates + /api/clusters/{clusterName}/diffvariables: + get: + description: This endpoint retrieves the variable differences for the specified + cluster. parameters: - default: Bearer description: Insert your access token @@ -6741,19 +6742,15 @@ paths: name: clusterName required: true type: string - - description: Force init - enum: - - force - in: path - name: force - type: string produces: - application/json responses: "200": - description: Archives purge queued + description: List of variable differences schema: - type: string + items: + $ref: '#/definitions/cluster.VariableDiff' + type: array "403": description: No valid ACL schema: @@ -6762,12 +6759,14 @@ paths: description: No cluster schema: type: string - summary: Init Restic Backup + summary: Retrieve variable differences for a specific cluster tags: - - ClusterBackups - /api/clusters/{clusterName}/archives/purge: + - Cluster + /api/clusters/{clusterName}/docker/actions/registry-connect: post: - description: Purges the restic backup for the specified cluster. + consumes: + - application/json + description: Logs in to a Docker registry using the provided credentials. parameters: - default: Bearer description: Insert your access token @@ -6780,11 +6779,21 @@ paths: name: clusterName required: true type: string + - description: Docker Registry Login Form + in: body + name: body + required: true + schema: + $ref: '#/definitions/server.DockerRegistryLoginForm' produces: - application/json responses: "200": - description: Archives purge queued + description: Docker registry login successful + schema: + type: string + "400": + description: Error decoding request body schema: type: string "403": @@ -6792,15 +6801,18 @@ paths: schema: type: string "500": - description: No cluster + description: Error creating request" or "Error making request to Docker + registry" or "Docker registry login failed schema: type: string - summary: Purge Restic Backup + summary: Docker Registry Login tags: - - ClusterBackups - /api/clusters/{clusterName}/archives/restore-config/{force}: - post: - description: Restores the restic config for the specified cluster. + - Docker + /api/clusters/{clusterName}/docker/browse/{imageRef}: + get: + consumes: + - application/json + description: Lists files in a specified directory of a Docker image. parameters: - default: Bearer description: Insert your access token @@ -6813,17 +6825,20 @@ paths: name: clusterName required: true type: string - - default: noforce - description: Force Restore + - description: Docker Image Reference in: path - name: force + name: imageRef required: true type: string produces: - application/json responses: "200": - description: Archives restore config done + description: List of files in the directory + schema: + $ref: '#/definitions/treehelper.FileTreeCache' + "400": + description: Image reference or source directory not provided schema: type: string "403": @@ -6831,15 +6846,18 @@ paths: schema: type: string "500": - description: No cluster + description: Error listing files in image directory" or "Error encoding + JSON schema: type: string - summary: Restore Restic Config + summary: List Files in Docker Image Directory tags: - - ClusterBackups - /api/clusters/{clusterName}/archives/stats: - get: - description: This endpoint retrieves the backup stats for the specified cluster. + - Docker + /api/clusters/{clusterName}/ext-role/accept: + post: + consumes: + - application/json + description: This endpoint accepts external operations for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -6852,29 +6870,35 @@ paths: name: clusterName required: true type: string + - description: User Form + in: body + name: body + required: true + schema: + $ref: '#/definitions/server.CloudUserForm' produces: - application/json responses: "200": - description: List of backups + description: Email sent to sponsor! schema: - items: - $ref: '#/definitions/archiver.BackupStat' - type: array + type: string "403": description: No valid ACL schema: type: string "500": - description: No cluster + description: Error accepting subscription schema: type: string - summary: Retrieve backup stats for a specific cluster + summary: Accept external operations for a specific cluster tags: - - ClusterBackups - /api/clusters/{clusterName}/archives/task-queue: - get: - description: Gets the restic task queue for the specified cluster. + - Cloud18 + /api/clusters/{clusterName}/ext-role/quote: + post: + consumes: + - application/json + description: This endpoint quotes external operations for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -6887,29 +6911,35 @@ paths: name: clusterName required: true type: string + - description: User Form + in: body + name: body + required: true + schema: + $ref: '#/definitions/server.CloudUserForm' produces: - application/json responses: "200": - description: Task queue fetched + description: Email sent to sponsor! schema: - items: - $ref: '#/definitions/archiver.ResticTask' - type: array + type: string "403": description: No valid ACL schema: type: string "500": - description: No cluster + description: Error accepting external operations schema: type: string - summary: Get Archives Task Queue + summary: Quote external operations for a specific cluster tags: - - ClusterBackups - /api/clusters/{clusterName}/archives/task-queue/reset: - get: - description: Empty the restic task queue for the specified cluster. + - Cloud18 + /api/clusters/{clusterName}/ext-role/refuse: + post: + consumes: + - application/json + description: This endpoint rejects external operations for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -6922,11 +6952,17 @@ paths: name: clusterName required: true type: string + - description: User Form + in: body + name: body + required: true + schema: + $ref: '#/definitions/server.CloudUserForm' produces: - application/json responses: "200": - description: Task queue reset + description: Subscription removed! schema: type: string "403": @@ -6934,15 +6970,18 @@ paths: schema: type: string "500": - description: No cluster + description: Error removing subscription schema: type: string - summary: Reset Archives Task Queue + summary: Reject external operations for a specific cluster tags: - - ClusterBackups - /api/clusters/{clusterName}/archives/unlock: + - Cloud18 + /api/clusters/{clusterName}/ext-role/subscribe: post: - description: Unlocks the restic backup for the specified cluster. + consumes: + - application/json + description: This endpoint subscribes external operations for the specified + cluster. parameters: - default: Bearer description: Insert your access token @@ -6955,11 +6994,17 @@ paths: name: clusterName required: true type: string + - description: User Form + in: body + name: body + required: true + schema: + $ref: '#/definitions/server.CloudUserForm' produces: - application/json responses: "200": - description: Archives purge queued + description: Email sent to sponsor! schema: type: string "403": @@ -6967,15 +7012,16 @@ paths: schema: type: string "500": - description: No cluster + description: Error subscribing external operations schema: type: string - summary: Unlock Restic Backup + summary: subscribe external operations for a specific cluster tags: - - ClusterBackups - /api/clusters/{clusterName}/backups: + - Cloud18 + /api/clusters/{clusterName}/graphite-filterlist: get: - description: This endpoint retrieves the backups for the specified cluster. + description: This endpoint retrieves the Graphite filter list for the specified + cluster. parameters: - default: Bearer description: Insert your access token @@ -6992,12 +7038,40 @@ paths: - application/json responses: "200": - description: List of backups + description: List of Graphite filters schema: items: - additionalProperties: true - type: object + type: string type: array + "500": + description: Internal Server Error + schema: + type: string + summary: Retrieve Graphite filter list for a specific cluster + tags: + - ClusterGraphite + /api/clusters/{clusterName}/health: + get: + description: Get the health status of the specified cluster. + parameters: + - default: Bearer + description: Insert your access token + in: header + name: Authorization + required: true + type: string + - description: Cluster Name + in: path + name: clusterName + required: true + type: string + produces: + - application/json + responses: + "200": + description: Cluster health fetched + schema: + $ref: '#/definitions/peer.PeerHealth' "403": description: No valid ACL schema: @@ -7006,12 +7080,12 @@ paths: description: No cluster schema: type: string - summary: Retrieve backups for a specific cluster + summary: Get Cluster Health tags: - - ClusterBackups - /api/clusters/{clusterName}/backups/stats: + - ClusterHealth + /api/clusters/{clusterName}/is-in-errstate/{errstate}: get: - description: This endpoint retrieves the backup stats for the specified cluster. + description: Checks if the specified cluster is in an error state. parameters: - default: Bearer description: Insert your access token @@ -7024,15 +7098,18 @@ paths: name: clusterName required: true type: string + - description: State to check + in: path + name: state + required: true + type: string produces: - application/json responses: "200": - description: List of backups + description: true" or "false schema: - items: - $ref: '#/definitions/archiver.BackupStat' - type: array + type: string "403": description: No valid ACL schema: @@ -7041,13 +7118,12 @@ paths: description: No cluster schema: type: string - summary: Retrieve backup stats for a specific cluster + summary: Check if Cluster is in Error State tags: - - ClusterBackups - /api/clusters/{clusterName}/certificates: + - ClusterHealth + /api/clusters/{clusterName}/jobs: get: - description: This endpoint retrieves the client certificates for the specified - cluster. + description: This endpoint retrieves the job entries for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7064,23 +7140,137 @@ paths: - application/json responses: "200": - description: List of client certificates + description: List of job entries schema: items: additionalProperties: true type: object type: array + "403": + description: No valid ACL + schema: + type: string "500": - description: Internal Server Error + description: Cluster Not Found schema: type: string - summary: Retrieve client certificates for a specific cluster + summary: Retrieve job entries for a specific cluster tags: - - ClusterCertificates - /api/clusters/{clusterName}/diffvariables: + - Cluster + /api/clusters/{clusterName}/jobs-log-level/{task}/{level}: get: - description: This endpoint retrieves the variable differences for the specified - cluster. + description: Checks if a specific log level is enabled for a given task in the + specified cluster. + parameters: + - description: Cluster Name + in: path + name: clusterName + required: true + type: string + - description: Task Name + enum: + - xtrabackup + - mariabackup + - errorlog + - slowquery + - sqlerrorlog + - auditlog + - zfssnapback + - optimize + - reseedxtrabackup + - reseedmariabackup + - reseedmysqldump + - flashbackxtrabackup + - flashbackmariadbackup + - flashbackmysqldump + - stop + - restart + - start + - printdefault-current + - printdefault-dummy + - jobs-check + - jobs-upgrade + in: path + name: task + required: true + type: string + - description: Log Level + enum: + - ERROR + - WARN + - INFO + - DEBUG + in: path + name: level + required: true + type: string + produces: + - application/json + responses: + "200": + description: true" or "false + schema: + type: string + "403": + description: No valid ACL + schema: + type: string + "500": + description: No cluster + schema: + type: string + summary: Check Cluster Log Level + tags: + - ClusterLogging + /api/clusters/{clusterName}/need-rolling-reprov: + get: + description: Checks if a specified cluster needs a rolling reprovision. + parameters: + - description: Cluster Name + in: path + name: clusterName + required: true + type: string + produces: + - text/plain + responses: + "200": + description: 200 -Need rolling reprov! + schema: + type: string + "500": + description: 503 -No rolling reprov needed!" or "500 -No cluster + schema: + type: string + summary: Check if a cluster needs a rolling reprovision + tags: + - Database + /api/clusters/{clusterName}/need-rolling-restart: + get: + description: Checks if a specified cluster needs a rolling restart. + parameters: + - description: Cluster Name + in: path + name: clusterName + required: true + type: string + produces: + - text/plain + responses: + "200": + description: 200 -Need rolling restart! + schema: + type: string + "500": + description: 503 -No rolling restart needed!" or "500 -No cluster + schema: + type: string + summary: Check if a cluster needs a rolling restart + tags: + - Database + /api/clusters/{clusterName}/opensvc-gateway: + get: + description: Retrieves the gateway nodes of the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7097,27 +7287,25 @@ paths: - application/json responses: "200": - description: List of variable differences + description: List of gateway nodes schema: items: - $ref: '#/definitions/cluster.VariableDiff' + type: string type: array "403": description: No valid ACL schema: type: string "500": - description: No cluster + description: No cluster" or "Error getting gateway nodes schema: type: string - summary: Retrieve variable differences for a specific cluster + summary: Get Cluster Gateway Nodes tags: - - Cluster - /api/clusters/{clusterName}/docker/actions/registry-connect: - post: - consumes: - - application/json - description: Logs in to a Docker registry using the provided credentials. + - ClusterGateway + /api/clusters/{clusterName}/opensvc-stats: + get: + description: Retrieves the OpenSVC daemon status of the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7130,40 +7318,29 @@ paths: name: clusterName required: true type: string - - description: Docker Registry Login Form - in: body - name: body - required: true - schema: - $ref: '#/definitions/server.DockerRegistryLoginForm' produces: - application/json responses: "200": - description: Docker registry login successful - schema: - type: string - "400": - description: Error decoding request body + description: OpenSVC daemon status fetched schema: - type: string + items: + $ref: '#/definitions/opensvc.DaemonNodeStats' + type: array "403": description: No valid ACL schema: type: string "500": - description: Error creating request" or "Error making request to Docker - registry" or "Docker registry login failed + description: No cluster" or "Error getting OpenSVC stats schema: type: string - summary: Docker Registry Login + summary: Get OpenSVC Daemon Status tags: - - Docker - /api/clusters/{clusterName}/docker/browse/{imageRef}: + - ClusterGateway + /api/clusters/{clusterName}/proxies/{proxyName}: get: - consumes: - - application/json - description: Lists files in a specified directory of a Docker image. + description: Shows the proxies for that specific named cluster parameters: - default: Bearer description: Insert your access token @@ -7176,39 +7353,24 @@ paths: name: clusterName required: true type: string - - description: Docker Image Reference - in: path - name: imageRef - required: true - type: string - produces: - - application/json responses: "200": - description: List of files in the directory - schema: - $ref: '#/definitions/treehelper.FileTreeCache' - "400": - description: Image reference or source directory not provided - schema: - type: string - "403": - description: No valid ACL + description: Server details retrieved successfully schema: - type: string + $ref: '#/definitions/cluster.Proxy' "500": - description: Error listing files in image directory" or "Error encoding - JSON + description: Internal Server Error schema: type: string - summary: List Files in Docker Image Directory + summary: Shows the proxies for that specific named cluster tags: - - Docker - /api/clusters/{clusterName}/ext-role/accept: - post: + - Proxies + /api/clusters/{clusterName}/proxies/{proxyName}/actions/need-reprov: + get: consumes: - application/json - description: This endpoint accepts external operations for the specified cluster. + description: Check if the proxy service for a given cluster and proxy needs + reprovisioning parameters: - default: Bearer description: Insert your access token @@ -7221,17 +7383,16 @@ paths: name: clusterName required: true type: string - - description: User Form - in: body - name: body + - description: Proxy Name + in: path + name: proxyName required: true - schema: - $ref: '#/definitions/server.CloudUserForm' + type: string produces: - application/json responses: "200": - description: Email sent to sponsor! + description: Need reprov! schema: type: string "403": @@ -7239,17 +7400,22 @@ paths: schema: type: string "500": - description: Error accepting subscription + description: No cluster schema: type: string - summary: Accept external operations for a specific cluster + "503": + description: No reprov needed!" "Not a Valid Server! + schema: + type: string + summary: Check if Proxy Needs Reprovision tags: - - Cloud18 - /api/clusters/{clusterName}/ext-role/quote: - post: + - Proxies + /api/clusters/{clusterName}/proxies/{proxyName}/actions/need-restart: + get: consumes: - application/json - description: This endpoint quotes external operations for the specified cluster. + description: Check if the proxy service for a given cluster and proxy needs + a restart parameters: - default: Bearer description: Insert your access token @@ -7262,17 +7428,16 @@ paths: name: clusterName required: true type: string - - description: User Form - in: body - name: body + - description: Proxy Name + in: path + name: proxyName required: true - schema: - $ref: '#/definitions/server.CloudUserForm' + type: string produces: - application/json responses: "200": - description: Email sent to sponsor! + description: Need restart! schema: type: string "403": @@ -7280,17 +7445,21 @@ paths: schema: type: string "500": - description: Error accepting external operations + description: No cluster schema: type: string - summary: Quote external operations for a specific cluster + "503": + description: No restart needed!" "Not a Valid Server! + schema: + type: string + summary: Check if Proxy Needs Restart tags: - - Cloud18 - /api/clusters/{clusterName}/ext-role/refuse: + - Proxies + /api/clusters/{clusterName}/proxies/{proxyName}/actions/provision: post: consumes: - application/json - description: This endpoint rejects external operations for the specified cluster. + description: Provision the proxy service for a given cluster and proxy parameters: - default: Bearer description: Insert your access token @@ -7303,17 +7472,16 @@ paths: name: clusterName required: true type: string - - description: User Form - in: body - name: body + - description: Proxy Name + in: path + name: proxyName required: true - schema: - $ref: '#/definitions/server.CloudUserForm' + type: string produces: - application/json responses: "200": - description: Subscription removed! + description: Proxy Service Provisioned schema: type: string "403": @@ -7321,18 +7489,17 @@ paths: schema: type: string "500": - description: Error removing subscription + description: Cluster Not Found" "Server Not Found schema: type: string - summary: Reject external operations for a specific cluster + summary: Provision Proxy Service tags: - - Cloud18 - /api/clusters/{clusterName}/ext-role/subscribe: + - Proxies + /api/clusters/{clusterName}/proxies/{proxyName}/actions/staging/{isStaging}: post: consumes: - application/json - description: This endpoint subscribes external operations for the specified - cluster. + description: Set the proxy service for a given cluster and proxy to staging parameters: - default: Bearer description: Insert your access token @@ -7345,17 +7512,21 @@ paths: name: clusterName required: true type: string - - description: User Form - in: body - name: body + - description: Proxy Name + in: path + name: proxyName required: true - schema: - $ref: '#/definitions/server.CloudUserForm' + type: string + - description: Is Staging + in: path + name: isStaging + required: true + type: string produces: - application/json responses: "200": - description: Email sent to sponsor! + description: Proxy Service Set to Staging schema: type: string "403": @@ -7363,16 +7534,21 @@ paths: schema: type: string "500": - description: Error subscribing external operations + description: Cluster Not Found" "Server Not Found schema: type: string - summary: subscribe external operations for a specific cluster + "503": + description: Not a Valid Server! + schema: + type: string + summary: Set Staging tags: - - Cloud18 - /api/clusters/{clusterName}/graphite-filterlist: - get: - description: This endpoint retrieves the Graphite filter list for the specified - cluster. + - Proxies + /api/clusters/{clusterName}/proxies/{proxyName}/actions/start: + post: + consumes: + - application/json + description: Start the proxy service for a given cluster and proxy parameters: - default: Bearer description: Insert your access token @@ -7385,25 +7561,34 @@ paths: name: clusterName required: true type: string + - description: Proxy Name + in: path + name: proxyName + required: true + type: string produces: - application/json responses: "200": - description: List of Graphite filters + description: Proxy Service Started schema: - items: - type: string - type: array + type: string + "403": + description: No valid ACL + schema: + type: string "500": - description: Internal Server Error + description: Cluster Not Found" "Server Not Found schema: type: string - summary: Retrieve Graphite filter list for a specific cluster + summary: Start Proxy Service tags: - - ClusterGraphite - /api/clusters/{clusterName}/health: - get: - description: Get the health status of the specified cluster. + - Proxies + /api/clusters/{clusterName}/proxies/{proxyName}/actions/stop: + post: + consumes: + - application/json + description: Stop the proxy service for a given cluster and proxy parameters: - default: Bearer description: Insert your access token @@ -7416,27 +7601,34 @@ paths: name: clusterName required: true type: string + - description: Proxy Name + in: path + name: proxyName + required: true + type: string produces: - application/json responses: "200": - description: Cluster health fetched + description: Proxy Service Stopped schema: - $ref: '#/definitions/peer.PeerHealth' + type: string "403": description: No valid ACL schema: type: string "500": - description: No cluster + description: Cluster Not Found" "Server Not Found schema: type: string - summary: Get Cluster Health + summary: Stop Proxy Service tags: - - ClusterHealth - /api/clusters/{clusterName}/is-in-errstate/{errstate}: - get: - description: Checks if the specified cluster is in an error state. + - Proxies + /api/clusters/{clusterName}/proxies/{proxyName}/actions/unprovision: + post: + consumes: + - application/json + description: Unprovision the proxy service for a given cluster and proxy parameters: - default: Bearer description: Insert your access token @@ -7449,16 +7641,16 @@ paths: name: clusterName required: true type: string - - description: State to check + - description: Proxy Name in: path - name: state + name: proxyName required: true type: string produces: - application/json responses: "200": - description: true" or "false + description: Proxy Service Unprovisioned schema: type: string "403": @@ -7466,15 +7658,15 @@ paths: schema: type: string "500": - description: No cluster + description: Cluster Not Found" "Server Not Found schema: type: string - summary: Check if Cluster is in Error State + summary: Unprovision Proxy Service tags: - - ClusterHealth - /api/clusters/{clusterName}/jobs: + - Proxies + /api/clusters/{clusterName}/queryrules: get: - description: This endpoint retrieves the job entries for the specified cluster. + description: This endpoint retrieves the query rules for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7491,7 +7683,7 @@ paths: - application/json responses: "200": - description: List of job entries + description: List of query rules schema: items: additionalProperties: true @@ -7501,127 +7693,82 @@ paths: description: No valid ACL schema: type: string - "500": - description: Cluster Not Found - schema: - type: string - summary: Retrieve job entries for a specific cluster - tags: - - Cluster - /api/clusters/{clusterName}/jobs-log-level/{task}/{level}: - get: - description: Checks if a specific log level is enabled for a given task in the - specified cluster. - parameters: - - description: Cluster Name - in: path - name: clusterName - required: true - type: string - - description: Task Name - enum: - - xtrabackup - - mariabackup - - errorlog - - slowquery - - sqlerrorlog - - auditlog - - zfssnapback - - optimize - - reseedxtrabackup - - reseedmariabackup - - reseedmysqldump - - flashbackxtrabackup - - flashbackmariadbackup - - flashbackmysqldump - - stop - - restart - - start - - printdefault-current - - printdefault-dummy - - jobs-check - - jobs-upgrade - in: path - name: task - required: true - type: string - - description: Log Level - enum: - - ERROR - - WARN - - INFO - - DEBUG - in: path - name: level - required: true - type: string - produces: - - application/json - responses: - "200": - description: true" or "false - schema: - type: string - "403": - description: No valid ACL - schema: - type: string "500": description: No cluster schema: type: string - summary: Check Cluster Log Level - tags: - - ClusterLogging - /api/clusters/{clusterName}/need-rolling-reprov: - get: - description: Checks if a specified cluster needs a rolling reprovision. + summary: Retrieve query rules for a specific cluster + tags: + - Cluster + /api/clusters/{clusterName}/restic/fetch: + post: + description: Fetches the restic backup for the specified cluster. parameters: + - default: Bearer + description: Insert your access token + in: header + name: Authorization + required: true + type: string - description: Cluster Name in: path name: clusterName required: true type: string produces: - - text/plain + - application/json responses: "200": - description: 200 -Need rolling reprov! + description: Restic snapshots fetch queued + schema: + type: string + "403": + description: No valid ACL schema: type: string "500": - description: 503 -No rolling reprov needed!" or "500 -No cluster + description: No cluster schema: type: string - summary: Check if a cluster needs a rolling reprovision + summary: Fetch Restic Snapshots tags: - - Database - /api/clusters/{clusterName}/need-rolling-restart: - get: - description: Checks if a specified cluster needs a rolling restart. + - ClusterRestic + /api/clusters/{clusterName}/restic/init: + post: + description: Inits the restic repository for the specified cluster. parameters: + - default: Bearer + description: Insert your access token + in: header + name: Authorization + required: true + type: string - description: Cluster Name in: path name: clusterName required: true type: string produces: - - text/plain + - application/json responses: "200": - description: 200 -Need rolling restart! + description: Restic repository initialized + schema: + type: string + "403": + description: No valid ACL schema: type: string "500": - description: 503 -No rolling restart needed!" or "500 -No cluster + description: No cluster schema: type: string - summary: Check if a cluster needs a rolling restart + summary: Init Restic Repository tags: - - Database - /api/clusters/{clusterName}/opensvc-gateway: - get: - description: Retrieves the gateway nodes of the specified cluster. + - ClusterRestic + /api/clusters/{clusterName}/restic/init/{force}: + post: + description: Inits the restic repository for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7634,29 +7781,33 @@ paths: name: clusterName required: true type: string + - description: Force init + enum: + - force + in: path + name: force + type: string produces: - application/json responses: "200": - description: List of gateway nodes + description: Restic repository initialized schema: - items: - type: string - type: array + type: string "403": description: No valid ACL schema: type: string "500": - description: No cluster" or "Error getting gateway nodes + description: No cluster schema: type: string - summary: Get Cluster Gateway Nodes + summary: Init Restic Repository tags: - - ClusterGateway - /api/clusters/{clusterName}/opensvc-stats: - get: - description: Retrieves the OpenSVC daemon status of the specified cluster. + - ClusterRestic + /api/clusters/{clusterName}/restic/purge/{snapshotID}: + post: + description: Purges the restic backup for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7669,29 +7820,32 @@ paths: name: clusterName required: true type: string + - description: Snapshot ID + in: path + name: snapshotID + required: true + type: string produces: - application/json responses: "200": - description: OpenSVC daemon status fetched + description: Restic repository purged schema: - items: - $ref: '#/definitions/opensvc.DaemonNodeStats' - type: array + type: string "403": description: No valid ACL schema: type: string "500": - description: No cluster" or "Error getting OpenSVC stats + description: No cluster schema: type: string - summary: Get OpenSVC Daemon Status + summary: Purge Restic Backup tags: - - ClusterGateway - /api/clusters/{clusterName}/proxies/{proxyName}: - get: - description: Shows the proxies for that specific named cluster + - ClusterRestic + /api/clusters/{clusterName}/restic/restore-config/{force}: + post: + description: Restores the restic config for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7704,24 +7858,33 @@ paths: name: clusterName required: true type: string + - default: noforce + description: Force Restore + in: path + name: force + required: true + type: string + produces: + - application/json responses: "200": - description: Server details retrieved successfully + description: Restic config restore done schema: - $ref: '#/definitions/cluster.Proxy' + type: string + "403": + description: No valid ACL + schema: + type: string "500": - description: Internal Server Error + description: No cluster schema: type: string - summary: Shows the proxies for that specific named cluster + summary: Restore Restic Config tags: - - Proxies - /api/clusters/{clusterName}/proxies/{proxyName}/actions/need-reprov: + - ClusterRestic + /api/clusters/{clusterName}/restic/snapshots: get: - consumes: - - application/json - description: Check if the proxy service for a given cluster and proxy needs - reprovisioning + description: This endpoint retrieves the backups for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7734,18 +7897,16 @@ paths: name: clusterName required: true type: string - - description: Proxy Name - in: path - name: proxyName - required: true - type: string produces: - application/json responses: "200": - description: Need reprov! + description: List of backups schema: - type: string + items: + additionalProperties: true + type: object + type: array "403": description: No valid ACL schema: @@ -7754,19 +7915,12 @@ paths: description: No cluster schema: type: string - "503": - description: No reprov needed!" "Not a Valid Server! - schema: - type: string - summary: Check if Proxy Needs Reprovision + summary: Retrieve backups for a specific cluster tags: - - Proxies - /api/clusters/{clusterName}/proxies/{proxyName}/actions/need-restart: + - ClusterRestic + /api/clusters/{clusterName}/restic/stats: get: - consumes: - - application/json - description: Check if the proxy service for a given cluster and proxy needs - a restart + description: This endpoint retrieves the backup stats for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7779,18 +7933,15 @@ paths: name: clusterName required: true type: string - - description: Proxy Name - in: path - name: proxyName - required: true - type: string produces: - application/json responses: "200": - description: Need restart! + description: List of backups schema: - type: string + items: + $ref: '#/definitions/backupmgr.BackupStat' + type: array "403": description: No valid ACL schema: @@ -7799,18 +7950,12 @@ paths: description: No cluster schema: type: string - "503": - description: No restart needed!" "Not a Valid Server! - schema: - type: string - summary: Check if Proxy Needs Restart + summary: Retrieve backup stats for a specific cluster tags: - - Proxies - /api/clusters/{clusterName}/proxies/{proxyName}/actions/provision: - post: - consumes: - - application/json - description: Provision the proxy service for a given cluster and proxy + - ClusterRestic + /api/clusters/{clusterName}/restic/task-queue: + get: + description: Gets the restic task queue for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7823,34 +7968,29 @@ paths: name: clusterName required: true type: string - - description: Proxy Name - in: path - name: proxyName - required: true - type: string produces: - application/json responses: "200": - description: Proxy Service Provisioned + description: Task queue fetched schema: - type: string + items: + $ref: '#/definitions/backupmgr.ResticTask' + type: array "403": description: No valid ACL schema: type: string "500": - description: Cluster Not Found" "Server Not Found + description: No cluster schema: type: string - summary: Provision Proxy Service + summary: Get Restic Task Queue tags: - - Proxies - /api/clusters/{clusterName}/proxies/{proxyName}/actions/staging/{isStaging}: + - ClusterRestic + /api/clusters/{clusterName}/restic/task-queue/cancel/{taskID}: post: - consumes: - - application/json - description: Set the proxy service for a given cluster and proxy to staging + description: Cancel the specified restic task for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7863,21 +8003,16 @@ paths: name: clusterName required: true type: string - - description: Proxy Name - in: path - name: proxyName - required: true - type: string - - description: Is Staging + - description: Task ID in: path - name: isStaging + name: taskID required: true type: string produces: - application/json responses: "200": - description: Proxy Service Set to Staging + description: Task cancelled schema: type: string "403": @@ -7885,21 +8020,15 @@ paths: schema: type: string "500": - description: Cluster Not Found" "Server Not Found - schema: - type: string - "503": - description: Not a Valid Server! + description: No cluster schema: type: string - summary: Set Staging + summary: Cancel Restic Task tags: - - Proxies - /api/clusters/{clusterName}/proxies/{proxyName}/actions/start: + - ClusterRestic + /api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}: post: - consumes: - - application/json - description: Start the proxy service for a given cluster and proxy + description: Modify the restic task queue for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7912,16 +8041,25 @@ paths: name: clusterName required: true type: string - - description: Proxy Name + - description: Move Type + enum: + - first + - after + - last in: path - name: proxyName + name: moveType + required: true + type: string + - description: Task ID + in: path + name: taskID required: true type: string produces: - application/json responses: "200": - description: Proxy Service Started + description: Task queue modified schema: type: string "403": @@ -7929,17 +8067,15 @@ paths: schema: type: string "500": - description: Cluster Not Found" "Server Not Found + description: No cluster schema: type: string - summary: Start Proxy Service + summary: Modify Restic Task Queue tags: - - Proxies - /api/clusters/{clusterName}/proxies/{proxyName}/actions/stop: + - ClusterRestic + /api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}/{afterID}: post: - consumes: - - application/json - description: Stop the proxy service for a given cluster and proxy + description: Modify the restic task queue for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7952,16 +8088,29 @@ paths: name: clusterName required: true type: string - - description: Proxy Name + - description: Move Type + enum: + - first + - after + - last in: path - name: proxyName + name: moveType + required: true + type: string + - description: Task ID + in: path + name: taskID required: true type: string + - description: After ID + in: path + name: afterID + type: string produces: - application/json responses: "200": - description: Proxy Service Stopped + description: Task queue modified schema: type: string "403": @@ -7969,17 +8118,15 @@ paths: schema: type: string "500": - description: Cluster Not Found" "Server Not Found + description: No cluster schema: type: string - summary: Stop Proxy Service + summary: Modify Restic Task Queue tags: - - Proxies - /api/clusters/{clusterName}/proxies/{proxyName}/actions/unprovision: - post: - consumes: - - application/json - description: Unprovision the proxy service for a given cluster and proxy + - ClusterRestic + /api/clusters/{clusterName}/restic/task-queue/reset: + get: + description: Empty the restic task queue for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -7992,16 +8139,11 @@ paths: name: clusterName required: true type: string - - description: Proxy Name - in: path - name: proxyName - required: true - type: string produces: - application/json responses: "200": - description: Proxy Service Unprovisioned + description: Task queue reset schema: type: string "403": @@ -8009,15 +8151,18 @@ paths: schema: type: string "500": - description: Cluster Not Found" "Server Not Found + description: No cluster schema: type: string - summary: Unprovision Proxy Service + summary: Reset Restic Task Queue tags: - - Proxies - /api/clusters/{clusterName}/queryrules: - get: - description: This endpoint retrieves the query rules for the specified cluster. + - ClusterRestic + /api/clusters/{clusterName}/restic/task-queue/resume: + post: + responses: {} + /api/clusters/{clusterName}/restic/unlock: + post: + description: Unlocks the restic repository for the specified cluster. parameters: - default: Bearer description: Insert your access token @@ -8034,12 +8179,9 @@ paths: - application/json responses: "200": - description: List of query rules + description: Restic repository unlocked schema: - items: - additionalProperties: true - type: object - type: array + type: string "403": description: No valid ACL schema: @@ -8048,9 +8190,9 @@ paths: description: No cluster schema: type: string - summary: Retrieve query rules for a specific cluster + summary: Unlock Restic Repository tags: - - Cluster + - ClusterRestic /api/clusters/{clusterName}/sales/accept-subscription: post: consumes: diff --git a/repmanv3/cluster_grpc.pb.go b/repmanv3/cluster_grpc.pb.go index d8dbca2c2..a5445ea8a 100644 --- a/repmanv3/cluster_grpc.pb.go +++ b/repmanv3/cluster_grpc.pb.go @@ -5,7 +5,8 @@ package repmanv3 import ( context "context" - archiver "github.com/signal18/replication-manager/utils/archiver" + backupmgr "github.com/signal18/replication-manager/utils/backupmgr" + dbhelper "github.com/signal18/replication-manager/utils/dbhelper" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -553,7 +554,7 @@ func _ClusterService_GetBackups_Handler(srv interface{}, stream grpc.ServerStrea } type ClusterService_GetBackupsServer interface { - Send(*archiver.Backup) error + Send(*backupmgr.BackupMetadata) error grpc.ServerStream } @@ -561,7 +562,7 @@ type clusterServiceGetBackupsServer struct { grpc.ServerStream } -func (x *clusterServiceGetBackupsServer) Send(m *archiver.Backup) error { +func (x *clusterServiceGetBackupsServer) Send(m *backupmgr.BackupMetadata) error { return x.ServerStream.SendMsg(m) } @@ -616,7 +617,7 @@ func _ClusterService_GetSchema_Handler(srv interface{}, stream grpc.ServerStream } type ClusterService_GetSchemaServer interface { - Send(*Table) error + Send(*dbhelper.Table) error grpc.ServerStream } @@ -624,7 +625,7 @@ type clusterServiceGetSchemaServer struct { grpc.ServerStream } -func (x *clusterServiceGetSchemaServer) Send(m *Table) error { +func (x *clusterServiceGetSchemaServer) Send(m *dbhelper.Table) error { return x.ServerStream.SendMsg(m) } diff --git a/server/api_cluster.go b/server/api_cluster.go index e0392b659..ef8a85420 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -103,68 +103,98 @@ func (repman *ReplicationManager) apiClusterProtectedHandler(router *mux.Router) negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterBackups)), )) + router.Handle("/api/clusters/{clusterName}/backups/stats", negroni.New( + negroni.HandlerFunc(repman.validateTokenMiddleware), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterBackupStat)), + )) + router.Handle("/api/clusters/{clusterName}/terminals", negroni.New( negroni.Wrap(http.HandlerFunc(repman.handlerGetTerminalSessionList)), )) - router.Handle("/api/clusters/{clusterName}/backups/stats", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/snapshots", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterBackupStats)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterSnapshots)), )) - router.Handle("/api/clusters/{clusterName}/archives", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/stats", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterBackups)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterSnapshotStat)), + )) + + router.Handle("/api/clusters/{clusterName}/restic/fetch", negroni.New( + negroni.HandlerFunc(repman.validateTokenMiddleware), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticFetch)), + )) + + router.Handle("/api/clusters/{clusterName}/restic/purge/{snapshotID}", negroni.New( + negroni.HandlerFunc(repman.validateTokenMiddleware), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticPurge)), + )) + + router.Handle("/api/clusters/{clusterName}/restic/unlock", negroni.New( + negroni.HandlerFunc(repman.validateTokenMiddleware), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticUnlock)), + )) + + router.Handle("/api/clusters/{clusterName}/restic/unlock/{force}", negroni.New( + negroni.HandlerFunc(repman.validateTokenMiddleware), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticUnlock)), + )) + + router.Handle("/api/clusters/{clusterName}/restic/init", negroni.New( + negroni.HandlerFunc(repman.validateTokenMiddleware), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticInitRepo)), )) - router.Handle("/api/clusters/{clusterName}/archives/stats", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/init/{force}", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxClusterBackupStats)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticInitRepo)), )) - router.Handle("/api/clusters/{clusterName}/archives/fetch", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/task-queue", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxArchivesFetch)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxGetResticTaskQueue)), )) - router.Handle("/api/clusters/{clusterName}/archives/purge", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/task-queue/resume", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxArchivesPurge)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticTaskQueueResume)), )) - router.Handle("/api/clusters/{clusterName}/archives/unlock", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/task-queue/pause", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxArchivesUnlock)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticTaskQueuePause)), )) - router.Handle("/api/clusters/{clusterName}/archives/init", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/task-queue/cancel/{taskID}", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxArchivesInit)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxCancelResticTask)), )) - router.Handle("/api/clusters/{clusterName}/archives/init/{force}", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxArchivesInit)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticTaskQueueMove)), )) - router.Handle("/api/clusters/{clusterName}/archives/task-queue", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}/{afterID}", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxGetArchivesTaskQueue)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticTaskQueueMove)), )) - router.Handle("/api/clusters/{clusterName}/archives/task-queue/reset", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/task-queue/reset", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxResetArchivesTaskQueue)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResetResticTaskQueue)), )) - router.Handle("/api/clusters/{clusterName}/archives/restore-config", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/restore-config", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxArchivesRestoreConfig)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticRestoreConfig)), )) - router.Handle("/api/clusters/{clusterName}/archives/restore-config/{force}", negroni.New( + router.Handle("/api/clusters/{clusterName}/restic/restore-config/{force}", negroni.New( negroni.HandlerFunc(repman.validateTokenMiddleware), - negroni.Wrap(http.HandlerFunc(repman.handlerMuxArchivesRestoreConfig)), + negroni.Wrap(http.HandlerFunc(repman.handlerMuxResticRestoreConfig)), )) router.Handle("/api/clusters/{clusterName}/certificates", negroni.New( @@ -1907,11 +1937,10 @@ func (repman *ReplicationManager) handlerMuxClusterTags(w http.ResponseWriter, r // @Produce json // @Param Authorization header string true "Insert your access token" default(Bearer ) // @Param clusterName path string true "Cluster Name" -// @Success 200 {array} map[string]interface{} "List of backups" +// @Success 200 {array} map[int64]backupmgr.BackupMetadata "List of backups" // @Failure 403 {string} string "No valid ACL" // @Failure 500 {string} string "No cluster" // @Router /api/clusters/{clusterName}/backups [get] -// @Router /api/clusters/{clusterName}/archives [get] func (repman *ReplicationManager) handlerMuxClusterBackups(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") vars := mux.Vars(r) @@ -1934,19 +1963,18 @@ func (repman *ReplicationManager) handlerMuxClusterBackups(w http.ResponseWriter } } -// handlerMuxClusterBackupStats handles the retrieval of backup stats for a given cluster. +// handlerMuxClusterBackupStat handles the retrieval of backup stats for a given cluster. // @Summary Retrieve backup stats for a specific cluster // @Description This endpoint retrieves the backup stats for the specified cluster. // @Tags ClusterBackups // @Produce json // @Param Authorization header string true "Insert your access token" default(Bearer ) // @Param clusterName path string true "Cluster Name" -// @Success 200 {array} archiver.BackupStat "List of backups" +// @Success 200 {array} backupmgr.BackupStat "List of backups" // @Failure 403 {string} string "No valid ACL" // @Failure 500 {string} string "No cluster" // @Router /api/clusters/{clusterName}/backups/stats [get] -// @Router /api/clusters/{clusterName}/archives/stats [get] -func (repman *ReplicationManager) handlerMuxClusterBackupStats(w http.ResponseWriter, r *http.Request) { +func (repman *ReplicationManager) handlerMuxClusterBackupStat(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") vars := mux.Vars(r) mycluster := repman.getClusterByName(vars["clusterName"]) @@ -2973,9 +3001,9 @@ func (repman *ReplicationManager) setClusterSetting(mycluster *cluster.Cluster, case "log-binlog-purge-level", "log-level-binlog-purge": val, _ := strconv.Atoi(value) mycluster.SetLogBinlogPurgeLevel(val) - case "log-archive-level", "log-level-archive": + case "log-archive-level", "log-level-restic": val, _ := strconv.Atoi(value) - mycluster.SetLogArchiveLevel(val) + mycluster.SetLogResticLevel(val) case "log-mailer-level", "log-level-mailer": val, _ := strconv.Atoi(value) mycluster.SetLogMailerLevel(val) @@ -3153,10 +3181,10 @@ func (repman *ReplicationManager) setClusterSetting(mycluster *cluster.Cluster, mycluster.LogModulePrintf(mycluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Restic backup is already enabled, testing new password validity") // Test if new password is valid. If yes, can be used directly - err = mycluster.ResticRepo.ResticTestPassword(newval) + err = mycluster.ResticManager.TestPassword(newval) // If not valid, test if old password is valid (rotate password) if err != nil { - err2 := mycluster.ResticRepo.ResticTestPassword(mycluster.Conf.GetDecryptedValue("backup-restic-password")) + err2 := mycluster.ResticManager.TestPassword(mycluster.Conf.GetDecryptedValue("backup-restic-password")) if err2 == nil { mycluster.LogModulePrintf(mycluster.Conf.Verbose, config.ConstLogModGeneral, config.LvlInfo, "Old restic password is valid, rotating password to new one") @@ -5062,12 +5090,6 @@ func (repman *ReplicationManager) handlerMuxCluster(w http.ResponseWriter, r *ht return } - cl, err = sjson.SetBytes(cl, "backupList", mycluster.BackupMetaMap.ToNewMap()) - if err != nil { - http.Error(w, "Encoding error", 500) - return - } - // Reduce the content of the cluster object cl, _ = sjson.DeleteBytes(cl, "config.apps") cl, _ = sjson.DeleteBytes(cl, "servers") @@ -6705,71 +6727,52 @@ func (repman *ReplicationManager) handlerMuxRemoveExternalOps(w http.ResponseWri w.Write([]byte("Sponsor partnership removed!")) } -// handlerMuxArchivesRestoreConfig handles the HTTP request to restore the restic config for a given cluster. -// @Summary Restore Restic Config -// @Description Restores the restic config for the specified cluster. -// @Tags ClusterBackups +// handlerMuxClusterBackups handles the retrieval of backups for a given cluster. +// @Summary Retrieve backups for a specific cluster +// @Description This endpoint retrieves the backups for the specified cluster. +// @Tags ClusterRestic // @Produce json // @Param Authorization header string true "Insert your access token" default(Bearer ) // @Param clusterName path string true "Cluster Name" -// @Param force path string true "Force Restore" Enum(force, noforce) default(noforce) -// @Success 200 {string} string "Archives restore config done" +// @Success 200 {array} map[string]interface{} "List of backups" // @Failure 403 {string} string "No valid ACL" // @Failure 500 {string} string "No cluster" -// @Router /api/clusters/{clusterName}/archives/restore-config/{force} [post] -func (repman *ReplicationManager) handlerMuxArchivesRestoreConfig(w http.ResponseWriter, r *http.Request) { +// @Router /api/clusters/{clusterName}/restic/snapshots [get] +func (repman *ReplicationManager) handlerMuxClusterSnapshots(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") - var force bool vars := mux.Vars(r) mycluster := repman.getClusterByName(vars["clusterName"]) - - if strings.ToLower(vars["force"]) == "force" { - force = true - } - if mycluster != nil { if valid, _ := repman.IsValidClusterACL(r, mycluster); !valid { http.Error(w, "No valid ACL", 403) return } - if !mycluster.Conf.BackupRestic { - http.Error(w, "Restic backup not enabled", 500) - return - } - - if mycluster.ResticRepo == nil { - http.Error(w, "No restic repo", 500) - return - } - - err := mycluster.RestoreResticConfig(force) + e := json.NewEncoder(w) + e.SetIndent("", "\t") + err := e.Encode(mycluster.GetSnapshots()) if err != nil { - http.Error(w, "Error restoring restic config: "+err.Error(), 500) + http.Error(w, "Encoding error", 500) return } } else { http.Error(w, "No cluster", 500) return } - - w.WriteHeader(http.StatusOK) - w.Write([]byte("Archives restore config done")) } -// handlerMuxArchivesFetch handles the HTTP request to fetch the restic snapshots for a given cluster. -// @Summary Fetch Archives -// @Description Fetches the restic backup for the specified cluster. -// @Tags ClusterBackups +// handlerMuxClusterBackupStats handles the retrieval of backup stats for a given cluster. +// @Summary Retrieve backup stats for a specific cluster +// @Description This endpoint retrieves the backup stats for the specified cluster. +// @Tags ClusterRestic // @Produce json // @Param Authorization header string true "Insert your access token" default(Bearer ) // @Param clusterName path string true "Cluster Name" -// @Success 200 {string} string "Archives fetch queued" +// @Success 200 {array} backupmgr.BackupStat "List of backups" // @Failure 403 {string} string "No valid ACL" // @Failure 500 {string} string "No cluster" -// @Router /api/clusters/{clusterName}/archives/fetch [post] -func (repman *ReplicationManager) handlerMuxArchivesFetch(w http.ResponseWriter, r *http.Request) { +// @Router /api/clusters/{clusterName}/restic/stats [get] +func (repman *ReplicationManager) handlerMuxClusterSnapshotStat(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") - vars := mux.Vars(r) mycluster := repman.getClusterByName(vars["clusterName"]) if mycluster != nil { @@ -6777,152 +6780,183 @@ func (repman *ReplicationManager) handlerMuxArchivesFetch(w http.ResponseWriter, http.Error(w, "No valid ACL", 403) return } - if !mycluster.Conf.BackupRestic { - http.Error(w, "Restic backup not enabled", 500) - return - } - - if mycluster.ResticRepo == nil { - http.Error(w, "No restic repo", 500) + e := json.NewEncoder(w) + e.SetIndent("", "\t") + err := e.Encode(mycluster.GetSnapshotStats()) + if err != nil { + http.Error(w, "Encoding error", 500) return } - - go mycluster.ResticFetchRepo() } else { http.Error(w, "No cluster", 500) return } - - w.WriteHeader(http.StatusOK) - w.Write([]byte("Archives fetch queued")) } -// handlerMuxArchivesPurge handles the HTTP request to purge the restic repo for a given cluster. -// @Summary Purge Restic Backup -// @Description Purges the restic backup for the specified cluster. -// @Tags ClusterBackups -// @Produce json -// @Param Authorization header string true "Insert your access token" default(Bearer ) -// @Param clusterName path string true "Cluster Name" -// @Success 200 {string} string "Archives purge queued" -// @Failure 403 {string} string "No valid ACL" -// @Failure 500 {string} string "No cluster" -// @Router /api/clusters/{clusterName}/archives/purge [post] -func (repman *ReplicationManager) handlerMuxArchivesPurge(w http.ResponseWriter, r *http.Request) { +// withResticCluster is a helper function to handle requests that require a restic-enabled cluster. +// It checks for cluster existence, ACL validity, and restic backup enablement before invoking the provided handler. +// If any checks fail, it responds with the appropriate HTTP error. +// Parameters: +// - w: http.ResponseWriter to write the response. +// - r: *http.Request representing the incoming request. +// - mustEnabled: bool indicating if restic backup must be enabled. +// - handler: function to handle the request if all checks pass. It receives the cluster and URL variables as parameters. +func (repman *ReplicationManager) withResticCluster( + w http.ResponseWriter, + r *http.Request, + mustEnabled bool, + handler func(cluster *cluster.Cluster, vars map[string]string), +) { w.Header().Set("Access-Control-Allow-Origin", "*") vars := mux.Vars(r) - mycluster := repman.getClusterByName(vars["clusterName"]) - if mycluster != nil { - if valid, _ := repman.IsValidClusterACL(r, mycluster); !valid { - http.Error(w, "No valid ACL", 403) - return - } - if !mycluster.Conf.BackupRestic { - http.Error(w, "Restic backup not enabled", 500) - return - } + cluster := repman.getClusterByName(vars["clusterName"]) + if cluster == nil { + http.Error(w, "No cluster", 500) + return + } - if mycluster.ResticRepo == nil { - http.Error(w, "No restic repo", 500) - return - } + if valid, _ := repman.IsValidClusterACL(r, cluster); !valid { + http.Error(w, "No valid ACL", 403) + return + } - go mycluster.ResticPurgeRepo() - } else { - http.Error(w, "No cluster", 500) + if mustEnabled && !cluster.Conf.BackupRestic { + http.Error(w, "Restic backup not enabled", 500) return } - w.WriteHeader(http.StatusOK) - w.Write([]byte("Archives purge queued")) + handler(cluster, vars) } -// handlerMuxArchivesUnlock handles the HTTP request to unlock restic repo for a given cluster. -// @Summary Unlock Restic Backup -// @Description Unlocks the restic backup for the specified cluster. -// @Tags ClusterBackups +// handlerMuxResticRestoreConfig handles the HTTP request to restore the restic config for a given cluster. +// @Summary Restore Restic Config +// @Description Restores the restic config for the specified cluster. +// @Tags ClusterRestic // @Produce json // @Param Authorization header string true "Insert your access token" default(Bearer ) // @Param clusterName path string true "Cluster Name" -// @Success 200 {string} string "Archives purge queued" +// @Param force path string true "Force Restore" Enum(force, noforce) default(noforce) +// @Success 200 {string} string "Restic config restore done" // @Failure 403 {string} string "No valid ACL" // @Failure 500 {string} string "No cluster" -// @Router /api/clusters/{clusterName}/archives/unlock [post] -func (repman *ReplicationManager) handlerMuxArchivesUnlock(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - - vars := mux.Vars(r) - mycluster := repman.getClusterByName(vars["clusterName"]) - if mycluster != nil { - if valid, _ := repman.IsValidClusterACL(r, mycluster); !valid { - http.Error(w, "No valid ACL", 403) - return +// @Router /api/clusters/{clusterName}/restic/restore-config/{force} [post] +func (repman *ReplicationManager) handlerMuxResticRestoreConfig(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, true, func(mycluster *cluster.Cluster, vars map[string]string) { + var force bool + if vars["force"] == "force" { + force = true } - if !mycluster.Conf.BackupRestic { - http.Error(w, "Restic backup not enabled", 500) + err := mycluster.RestoreResticConfig(force) + if err != nil { + http.Error(w, "Error restoring restic config: "+err.Error(), 500) return } - if mycluster.ResticRepo == nil { - http.Error(w, "No restic repo", 500) - return - } + w.WriteHeader(http.StatusOK) + w.Write([]byte("Restic config restore done")) + }) +} - err := mycluster.ResticUnlockRepo() - if err != nil { - http.Error(w, "Error unlocking archives :"+err.Error(), 500) - return - } +// handlerMuxResticFetch handles the HTTP request to fetch the restic snapshots for a given cluster. +// @Summary Fetch Restic Snapshots +// @Description Fetches the restic backup for the specified cluster. +// @Tags ClusterRestic +// @Produce json +// @Param Authorization header string true "Insert your access token" default(Bearer ) +// @Param clusterName path string true "Cluster Name" +// @Success 200 {string} string "Restic snapshots fetch queued" +// @Failure 403 {string} string "No valid ACL" +// @Failure 500 {string} string "No cluster" +// @Router /api/clusters/{clusterName}/restic/fetch [post] +func (repman *ReplicationManager) handlerMuxResticFetch(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, true, func(mycluster *cluster.Cluster, vars map[string]string) { - } else { - http.Error(w, "No cluster", 500) - return - } + mycluster.ResticFetchRepo() - w.WriteHeader(http.StatusOK) - w.Write([]byte("Archives unlock queued")) + w.WriteHeader(http.StatusOK) + w.Write([]byte("Restic snapshots fetch queued")) + }) } -// handlerMuxArchivesInit handles the HTTP request to init restic repo for a given cluster. -// @Summary Init Restic Backup -// @Description Inits the restic backup for the specified cluster. -// @Tags ClusterBackups +// handlerMuxResticPurge handles the HTTP request to purge the restic repo for a given cluster. +// @Summary Purge Restic Backup +// @Description Purges the restic backup for the specified cluster. +// @Tags ClusterRestic // @Produce json // @Param Authorization header string true "Insert your access token" default(Bearer ) // @Param clusterName path string true "Cluster Name" -// @Param force path string false "Force init" Enums(force) -// @Success 200 {string} string "Archives purge queued" +// @Param snapshotID path string true "Snapshot ID" +// @Success 200 {string} string "Restic repository purged" // @Failure 403 {string} string "No valid ACL" // @Failure 500 {string} string "No cluster" -// @Router /api/clusters/{clusterName}/archives/init [post] -// @Router /api/clusters/{clusterName}/archives/init/{force} [post] -func (repman *ReplicationManager) handlerMuxArchivesInit(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") +// @Router /api/clusters/{clusterName}/restic/purge/{snapshotID} [post] +func (repman *ReplicationManager) handlerMuxResticPurge(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, true, func(mycluster *cluster.Cluster, vars map[string]string) { - vars := mux.Vars(r) - mycluster := repman.getClusterByName(vars["clusterName"]) - if mycluster != nil { - if valid, _ := repman.IsValidClusterACL(r, mycluster); !valid { - http.Error(w, "No valid ACL", 403) + if vars["snapshotID"] == "" { + http.Error(w, "No snapshot ID provided, please provide one or use 'policy' to purge according to policy", 500) return } - - if !mycluster.Conf.BackupRestic { - http.Error(w, "Restic backup not enabled", 500) - return + if vars["snapshotID"] == "policy" { + err := mycluster.ResticPurgeRepo() + if err != nil { + http.Error(w, "Error purging restic repo: "+err.Error(), 500) + return + } + } else { + err := mycluster.AddPurgeTask(vars["snapshotID"]) + if err != nil { + http.Error(w, "Error adding purge task: "+err.Error(), 500) + return + } } - if mycluster.ResticRepo == nil { - http.Error(w, "No restic repo", 500) - return - } + w.WriteHeader(http.StatusOK) + w.Write([]byte("Restic repository purged")) + }) +} + +// handlerMuxResticUnlock handles the HTTP request to unlock restic repo for a given cluster. +// @Summary Unlock Restic Repository +// @Description Unlocks the restic repository for the specified cluster. +// @Tags ClusterRestic +// @Produce json +// @Param Authorization header string true "Insert your access token" default(Bearer ) +// @Param clusterName path string true "Cluster Name" +// @Success 200 {string} string "Restic repository unlocked" +// @Failure 403 {string} string "No valid ACL" +// @Failure 500 {string} string "No cluster" +// @Router /api/clusters/{clusterName}/restic/unlock [post] +func (repman *ReplicationManager) handlerMuxResticUnlock(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, true, func(mycluster *cluster.Cluster, vars map[string]string) { + + mycluster.ResticUnlockRepo() + + w.WriteHeader(http.StatusOK) + w.Write([]byte("Restic repository unlocked")) + }) +} +// handlerMuxResticInitRepo handles the HTTP request to init restic repo for a given cluster. +// @Summary Init Restic Repository +// @Description Inits the restic repository for the specified cluster. +// @Tags ClusterRestic +// @Produce json +// @Param Authorization header string true "Insert your access token" default(Bearer ) +// @Param clusterName path string true "Cluster Name" +// @Param force path string false "Force init" Enums(force) +// @Success 200 {string} string "Restic repository initialized" +// @Failure 403 {string} string "No valid ACL" +// @Failure 500 {string} string "No cluster" +// @Router /api/clusters/{clusterName}/restic/init [post] +// @Router /api/clusters/{clusterName}/restic/init/{force} [post] +func (repman *ReplicationManager) handlerMuxResticInitRepo(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, true, func(mycluster *cluster.Cluster, vars map[string]string) { var force bool - v, ok := vars["force"] - if ok && v == "force" { + if vars["force"] == "force" { force = true } @@ -6932,46 +6966,24 @@ func (repman *ReplicationManager) handlerMuxArchivesInit(w http.ResponseWriter, return } - } else { - http.Error(w, "No cluster", 500) - return - } - - w.WriteHeader(http.StatusOK) - w.Write([]byte("Archives unlock queued")) + w.WriteHeader(http.StatusOK) + w.Write([]byte("Restic repository initialized")) + }) } -// handlerMuxGetArchivesTaskQueue handles the HTTP request to get the restic task queue for a given cluster. -// @Summary Get Archives Task Queue +// handlerMuxGetResticTaskQueue handles the HTTP request to get the restic task queue for a given cluster. +// @Summary Get Restic Task Queue // @Description Gets the restic task queue for the specified cluster. -// @Tags ClusterBackups +// @Tags ClusterRestic // @Produce json // @Param Authorization header string true "Insert your access token" default(Bearer ) // @Param clusterName path string true "Cluster Name" -// @Success 200 {array} archiver.ResticTask "Task queue fetched" +// @Success 200 {array} backupmgr.ResticTask "Task queue fetched" // @Failure 403 {string} string "No valid ACL" // @Failure 500 {string} string "No cluster" -// @Router /api/clusters/{clusterName}/archives/task-queue [get] -func (repman *ReplicationManager) handlerMuxGetArchivesTaskQueue(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - - vars := mux.Vars(r) - mycluster := repman.getClusterByName(vars["clusterName"]) - if mycluster != nil { - if valid, _ := repman.IsValidClusterACL(r, mycluster); !valid { - http.Error(w, "No valid ACL", 403) - return - } - - if !mycluster.Conf.BackupRestic { - http.Error(w, "Restic backup not enabled", 500) - return - } - - if mycluster.ResticRepo == nil { - http.Error(w, "No restic repo", 500) - return - } +// @Router /api/clusters/{clusterName}/restic/task-queue [get] +func (repman *ReplicationManager) handlerMuxGetResticTaskQueue(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, false, func(mycluster *cluster.Cluster, vars map[string]string) { taskqueue, err := mycluster.ResticGetQueue() if err != nil { @@ -6989,56 +7001,138 @@ func (repman *ReplicationManager) handlerMuxGetArchivesTaskQueue(w http.Response w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) w.Write(taskqueueJSON) - } else { - http.Error(w, "No cluster", 500) - return - } + }) } -// handlerMuxResetArchivesTaskQueue handles the HTTP request to reset the restic task queue for a given cluster. -// @Summary Reset Archives Task Queue -// @Description Empty the restic task queue for the specified cluster. -// @Tags ClusterBackups +// @Router /api/clusters/{clusterName}/restic/task-queue/resume [post] +func (repman *ReplicationManager) handlerMuxResticTaskQueueResume(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, true, func(mycluster *cluster.Cluster, vars map[string]string) { + + mycluster.ResticRunQueue() + + w.WriteHeader(http.StatusOK) + w.Write([]byte("Task queue resumed")) + }) +} + +// @Router /api/clusters/{clusterName}/restic/task-queue/resume [post] +func (repman *ReplicationManager) handlerMuxResticTaskQueuePause(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, false, func(mycluster *cluster.Cluster, vars map[string]string) { + + mycluster.ResticPauseQueue() + + w.WriteHeader(http.StatusOK) + w.Write([]byte("Task queue paused")) + }) +} + +// handlerMuxModifyResticTaskQueue handles the HTTP request to modify the restic task queue for a given cluster. +// @Summary Modify Restic Task Queue +// @Description Modify the restic task queue for the specified cluster. +// @Tags ClusterRestic // @Produce json // @Param Authorization header string true "Insert your access token" default(Bearer ) // @Param clusterName path string true "Cluster Name" -// @Success 200 {string} string "Task queue reset" +// @Param moveType path backupmgr.MoveType true "Move Type" +// @Param taskID path string true "Task ID" +// @Param afterID path string false "After ID" +// @Success 200 {string} string "Task queue modified" // @Failure 403 {string} string "No valid ACL" // @Failure 500 {string} string "No cluster" -// @Router /api/clusters/{clusterName}/archives/task-queue/reset [get] -func (repman *ReplicationManager) handlerMuxResetArchivesTaskQueue(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") +// @Router /api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID} [post] +// @Router /api/clusters/{clusterName}/restic/task-queue/move/{moveType}/{taskID}/{afterID} [post] +func (repman *ReplicationManager) handlerMuxResticTaskQueueMove(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, false, func(mycluster *cluster.Cluster, vars map[string]string) { + moveType := vars["moveType"] + var taskID, afterID int + + // Parse ID to int + taskID, err := strconv.Atoi(vars["taskID"]) + if err != nil { + http.Error(w, "Invalid taskID", 500) + return + } - vars := mux.Vars(r) - mycluster := repman.getClusterByName(vars["clusterName"]) - if mycluster != nil { - if valid, _ := repman.IsValidClusterACL(r, mycluster); !valid { - http.Error(w, "No valid ACL", 403) + if moveType == "after" { + afterID, err = strconv.Atoi(vars["afterID"]) + if err != nil { + http.Error(w, "Invalid afterID", 500) + return + } + } + + switch moveType { + case "after", "first", "last": + err := mycluster.ResticModifyQueue(moveType, taskID, afterID) + if err != nil { + http.Error(w, "Error modifying task queue :"+err.Error(), 500) + return + } + default: + http.Error(w, "Invalid moveType. Must be one of: after, first, last", 500) return } - if !mycluster.Conf.BackupRestic { - http.Error(w, "Restic backup not enabled", 500) + w.WriteHeader(http.StatusOK) + w.Write([]byte("Task queue modified")) + }) +} + +// handlerMuxCancelResticTask handles the HTTP request to cancel a restic task for a given cluster. +// @Summary Cancel Restic Task +// @Description Cancel the specified restic task for the specified cluster. +// @Tags ClusterRestic +// @Produce json +// @Param Authorization header string true "Insert your access token" default(Bearer ) +// @Param clusterName path string true "Cluster Name" +// @Param taskID path string true "Task ID" +// @Success 200 {string} string "Task cancelled" +// @Failure 403 {string} string "No valid ACL" +// @Failure 500 {string} string "No cluster" +// @Router /api/clusters/{clusterName}/restic/task-queue/cancel/{taskID} [post] +func (repman *ReplicationManager) handlerMuxCancelResticTask(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, false, func(mycluster *cluster.Cluster, vars map[string]string) { + + taskID, err := strconv.Atoi(vars["taskID"]) + if err != nil { + http.Error(w, "Invalid taskID", 500) return } - if mycluster.ResticRepo == nil { - http.Error(w, "No restic repo", 500) + err = mycluster.ResticCancelTask(taskID) + if err != nil { + http.Error(w, "Error cancelling task :"+err.Error(), 500) return } - err := mycluster.ResticResetQueue() + w.WriteHeader(http.StatusOK) + w.Write([]byte("Task cancelled")) + }) +} + +// handlerMuxResetResticTaskQueue handles the HTTP request to reset the restic task queue for a given cluster. +// @Summary Reset Restic Task Queue +// @Description Empty the restic task queue for the specified cluster. +// @Tags ClusterRestic +// @Produce json +// @Param Authorization header string true "Insert your access token" default(Bearer ) +// @Param clusterName path string true "Cluster Name" +// @Success 200 {string} string "Task queue reset" +// @Failure 403 {string} string "No valid ACL" +// @Failure 500 {string} string "No cluster" +// @Router /api/clusters/{clusterName}/restic/task-queue/reset [get] +func (repman *ReplicationManager) handlerMuxResetResticTaskQueue(w http.ResponseWriter, r *http.Request) { + repman.withResticCluster(w, r, false, func(mycluster *cluster.Cluster, vars map[string]string) { + + err := mycluster.ResticClearQueue() if err != nil { - http.Error(w, "Error resetting task queue :"+err.Error(), 500) + http.Error(w, "Error clearing task queue :"+err.Error(), 500) return } w.WriteHeader(http.StatusOK) - w.Write([]byte("Task queue reset")) - } else { - http.Error(w, "No cluster", 500) - return - } + w.Write([]byte("Task queue cleared")) + }) } type MeetAlertMessage struct { diff --git a/server/api_database.go b/server/api_database.go index 31de33c44..0fa59c3be 100644 --- a/server/api_database.go +++ b/server/api_database.go @@ -22,6 +22,7 @@ import ( "github.com/gorilla/mux" "github.com/signal18/replication-manager/cluster" "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/backupmgr" "github.com/signal18/replication-manager/utils/crypto" "github.com/tidwall/gjson" "github.com/tidwall/sjson" @@ -1274,7 +1275,7 @@ func (repman *ReplicationManager) handlerMuxServerPITR(w http.ResponseWriter, r } node := mycluster.GetServerFromName(vars["serverName"]) if node != nil { - var formPit config.PointInTimeMeta + var formPit backupmgr.PointInTimeMeta // This will always true for making standalone formPit.IsInPITR = true err := json.NewDecoder(r.Body).Decode(&formPit) diff --git a/server/repmanv3.go b/server/repmanv3.go index 180f13ce9..339a68091 100644 --- a/server/repmanv3.go +++ b/server/repmanv3.go @@ -523,13 +523,13 @@ func (s *ReplicationManager) SetActionForClusterSettings(ctx context.Context, in if err = user.Granted(config.GrantDBConfigFlag); err != nil { return nil, err } - mycluster.AddDBTag(in.TagValue,false) + mycluster.AddDBTag(in.TagValue, false) case v3.ClusterSetting_DROP_DB_TAG: if err = user.Granted(config.GrantDBConfigFlag); err != nil { return nil, err } - mycluster.DropDBTag(in.TagValue,false) + mycluster.DropDBTag(in.TagValue, false) } return res, nil @@ -858,7 +858,7 @@ func (s *ReplicationManager) GetBackups(in *v3.Cluster, stream v3.ClusterService } for _, backup := range mycluster.GetBackups() { - if err := stream.Send(&backup); err != nil { + if err := stream.Send(backup); err != nil { return err } } diff --git a/server/server.go b/server/server.go index 01b5cee99..ae47b46d1 100644 --- a/server/server.go +++ b/server/server.go @@ -420,7 +420,7 @@ func (repman *ReplicationManager) AddFlags(flags *pflag.FlagSet, conf *config.Co flags.BoolVar(&conf.LogExternalScript, "log-external-script", true, "To log external scripts output") flags.IntVar(&conf.LogExternalScriptLevel, "log-level-external-script", 3, "Log external scripts Level") - flags.IntVar(&conf.LogArchiveLevel, "log-level-archive", 2, "Log Level for backup archive (restic)") + flags.IntVar(&conf.LogResticLevel, "log-level-restic", 3, "Log Level for restic") flags.IntVar(&conf.LogMailerLevel, "log-level-mailer", 3, "Log Level for mailer") // Fetchers @@ -798,6 +798,8 @@ func (repman *ReplicationManager) AddFlags(flags *pflag.FlagSet, conf *config.Co flags.StringVar(&conf.BackupResticRepository, "backup-restic-repository", "s3:https://s3.signal18.io/backups", "Restic backend repository") flags.StringVar(&conf.BackupResticPassword, "backup-restic-password", "secret", "Restic backend password") flags.BoolVar(&conf.BackupResticAws, "backup-restic-aws", false, "Restic will archive to s3 or to datadir/backups/archive") + flags.BoolVar(&conf.BackupResticSaveQueueOnShutdown, "backup-restic-save-queue-on-shutdown", true, "Backup manager will save pending backup queue on server shutdown") + flags.BoolVar(&conf.BackupResticRunQueueOnStartup, "backup-restic-run-queue-on-startup", true, "Backup manager will process pending backup queue on server startup. If false, pending backup will stay in queue until manually started via API or UI") flags.BoolVar(&conf.BackupStreaming, "backup-streaming", false, "Backup streaming to cloud ") flags.BoolVar(&conf.BackupStreamingDebug, "backup-streaming-debug", false, "Debug mode for streaming to cloud ") flags.StringVar(&conf.BackupStreamingAwsAccessKeyId, "backup-streaming-aws-access-key-id", "admin", "Backup AWS key id") diff --git a/share/dashboard_react/src/Pages/Home/index.jsx b/share/dashboard_react/src/Pages/Home/index.jsx index 72dd085ea..6a1db9a6a 100644 --- a/share/dashboard_react/src/Pages/Home/index.jsx +++ b/share/dashboard_react/src/Pages/Home/index.jsx @@ -6,7 +6,7 @@ import TabItems from '../../components/TabItems' import ClusterList from '../ClusterList' import { useDispatch, useSelector } from 'react-redux' import { - getBackupSnapshot, + getResticSnapshot, getClusterAlerts, getClusterCertificates, getClusterData, @@ -24,7 +24,10 @@ import { clearCluster, getClusterApps, getOpenSVCStats, - getClusterLogs + getClusterLogs, + getResticStats, + getBackups, + getResticQueue } from '../../redux/clusterSlice' import { getClusters, getMonitoredData, getClusterPeers, getClusterForSale } from '../../redux/globalClustersSlice' import { AppSettings } from '../../AppSettings' @@ -190,8 +193,11 @@ function Home() { dispatch(getClusterCertificates({ clusterName: selectedClusterNameRef.current })) } if (dashboardTabsRef.current[selectedTabRef.current - 1] === 'Maintenance') { - dispatch(getBackupSnapshot({ clusterName: selectedClusterNameRef.current })) + dispatch(getResticSnapshot({ clusterName: selectedClusterNameRef.current })) + dispatch(getResticStats({ clusterName: selectedClusterNameRef.current })) + dispatch(getBackups({ clusterName: selectedClusterNameRef.current })) dispatch(getBackupStats({ clusterName: selectedClusterNameRef.current })) + dispatch(getResticQueue({ clusterName: selectedClusterNameRef.current })) dispatch(getJobs({ clusterName: selectedClusterNameRef.current })) } if (dashboardTabsRef.current[selectedTabRef.current - 1] === 'Tops') { diff --git a/share/dashboard_react/src/Pages/Maintenance/index.jsx b/share/dashboard_react/src/Pages/Maintenance/index.jsx index 51e9f3c8b..a537a14de 100644 --- a/share/dashboard_react/src/Pages/Maintenance/index.jsx +++ b/share/dashboard_react/src/Pages/Maintenance/index.jsx @@ -11,10 +11,125 @@ import BackupSettings from '../Settings/BackupSettings' import SchedulerSettings from '../Settings/SchedulerSettings' import { TaskLogs } from '../Dashboard/components/Logs' import DatabaseJobs from './DatabaseJobs' +import { purgeResticSnapshot, resticQueueCancel, resticQueueMove, resticQueuePause, resticQueueResume } from '../../redux/clusterSlice' +import RMIconButton from '../../components/RMIconButton' +import ConfirmModal from '../../components/Modals/ConfirmModal' +import { HiTrash } from 'react-icons/hi' +import { showWarningToast } from '../../redux/toastSlice' + +const QueueMoveForm = React.memo(({ list = [], currentId, onChange = (dir, afterId) => { } }) => { + const [direction, setDirection] = useState('first'); + + const handleDirectionChange = (e) => { + setDirection(e.target.value); + if (e.target.value !== 'after') { + onChange(e.target.value, null); + } + }; + + const handleAfterChange = (e) => { + onChange('after', e.target.value); + }; + return ( + + + Move {direction}: + + + {direction === 'after' && ( + + Move After: + + + )} + + ) +}) + +const formConfig = { + queueMove: { + component: QueueMoveForm, + getProps: (ctx) => ({ + list: ctx.queueData, + currentId: ctx.payload.data.taskId, + onChange: ctx.handleMove + }) + } +}; + +const DynamicForm = (ctx) => { + const { action } = ctx.payload; + + const entry = formConfig[action]; + if (!entry) return null; + + const Component = entry.component; + const props = entry.getProps(ctx); + + return ; +}; + +const resticTaskType = (rtt) => { + switch (rtt) { + case 0: + return "init" + case 1: + return "fetch" + case 2: + return "backup" + case 3: + return "purge" + case 4: + return "unlock" + case 5: + return "changepass" + default: + return "Unknown" + } +} + +const resticTaskDetail = (row) => { + switch (row.task_type) { + case 2: + return ( + + Path: + {row.dir_path} + + + Tags: + {row.tags?.join(', ')} + + ) + case 3: + return ( + + Options: + {JSON.stringify(row.opt)} + + ) + default: + return (
-
) + } +} + function Maintenance({ selectedCluster, user }) { const [data, setData] = useState([]) const [snapshotData, setSnapshotData] = useState([]) + const [queueData, setQueueData] = useState([]) + const [confirmState, setConfirmState] = useState({ isOpen: false, title: '', payload: null }) + const { isOpen: isConfirmModalOpen, title, payload } = confirmState + + const dispatch = useDispatch() const columnHelper = createColumnHelper() const { isOpen: isBackupSettingsOpen, onToggle: onBackupSettingsToggle } = useDisclosure({ defaultIsOpen: JSON.parse(localStorage.getItem('isBackupSettingsOpen')) || false @@ -35,9 +150,61 @@ function Maintenance({ selectedCluster, user }) { defaultIsOpen: JSON.parse(localStorage.getItem('isLogsInBackupOpen')) || false }) - const { - cluster: { backups : { snapshots , stats} } - } = useSelector((state) => state) + const list = useSelector((state) => state.cluster.backups.list) + const backupStats = useSelector((state) => state.cluster.backups.stats) + + const snapshots = useSelector((state) => state.cluster.restic.snapshots) + const stats = useSelector((state) => state.cluster.restic.stats) + const resticQueue = useSelector((state) => state.cluster.restic.queue) + + const openConfirmModal = (title, payload) => { + setConfirmState({ isOpen: true, title, payload }) + } + + const closeConfirmModal = () => { + setConfirmState({ isOpen: false, title: '', payload: null }) + } + + const handleConfirm = () => { + if (payload && payload.action) { + switch (payload.action) { + case 'snapshotPurge': + dispatch(purgeResticSnapshot({ clusterName: selectedCluster.name, snapshotId: payload.data.snapshotId })) + break + case 'queueCancel': + dispatch(resticQueueCancel({ clusterName: selectedCluster.name, taskId: payload.data.taskId })) + break + case 'queueMove': + dispatch(resticQueueMove({ clusterName: selectedCluster.name, taskId: payload.data.taskId, direction: payload.data.direction, afterId: payload.data.afterId })) + break + case 'queuePause': + dispatch(resticQueuePause({ clusterName: selectedCluster.name })); + break + case 'queueResume': + dispatch(resticQueueResume({ clusterName: selectedCluster.name })); + break + default: + dispatch(showWarningToast({ title: 'Unknown action', description: `The action ${payload.action} is not recognized.` })) + break + } + } + + closeConfirmModal() + } + + const handleMove = (direction, afterId) => { + setConfirmState((prevState) => ({ + ...prevState, + payload: { + ...prevState.payload, + data: { + ...prevState.payload.data, + direction, + afterId + } + } + })) + } useEffect(() => { localStorage.setItem('isBackupSettingsOpen', JSON.stringify(isBackupSettingsOpen)) @@ -60,11 +227,13 @@ function Maintenance({ selectedCluster, user }) { }, [isBackupsOpen]) useEffect(() => { - if (selectedCluster?.backupList) { - const arrData = convertObjectToArray(selectedCluster.backupList) + if (list) { + const arrData = convertObjectToArray(list) setData(arrData.reverse()) + } else { + setData([]) } - }, [selectedCluster?.backupList]) + }, [selectedCluster?.name, list]) useEffect(() => { if (snapshots?.length > 0) { @@ -72,7 +241,31 @@ function Maintenance({ selectedCluster, user }) { } else { setSnapshotData([]) } - }, [selectedCluster?.name,snapshots]) + }, [selectedCluster?.name, snapshots]) + + useEffect(() => { + if (resticQueue?.length > 0) { + const arrData = convertObjectToArray(resticQueue) + setQueueData(arrData.reverse()) + } else { + setQueueData([]) + } + }, [selectedCluster?.name, resticQueue]) + + const backupDataStats = [ + { + key: 'Total Size', + value: backupStats?.total_size + }, + { + key: 'Total File Count', + value: backupStats?.total_file_count + }, + { + key: 'Total Blob Count', + value: backupStats?.total_blob_count + } + ] const columns = useMemo( () => [ @@ -179,8 +372,7 @@ function Maintenance({ selectedCluster, user }) { header: 'Completed', id: 'completed' }) - ], - [] + ] ) const snapshotDataStats = [ @@ -214,8 +406,38 @@ function Maintenance({ selectedCluster, user }) { }), columnHelper.accessor((row) => row.tags?.join(','), { header: 'Tags' + }), + // Added Purge action column + columnHelper.accessor((row) => ( + openConfirmModal('Purge Snapshot', { action: 'snapshotPurge', data: { snapshotId: row.id } })} /> + ), { + cell: (info) => info.getValue(), + header: 'Actions', + id: 'actions', + }) + ]) + + const queueColumns = useMemo(() => [ + columnHelper.accessor((row) => row.task_id, { + header: 'ID', + id: 'task_id' + }), + columnHelper.accessor((row) => resticTaskType(row.task_type), { + header: 'Task Type' + }), + columnHelper.accessor((row) => resticTaskDetail(row), { + header: 'Details' + }), + // Added Purge action column + columnHelper.accessor((row) => ( + openConfirmModal('Cancel Queued Task', { action: 'queueCancel', data: { taskId: row.task_id } })} /> + ), { + cell: (info) => info.getValue(), + header: 'Actions', + id: 'actions', }) ]) + return ( } + body={ + + + + + } /> + } /> @@ -277,6 +505,12 @@ function Maintenance({ selectedCluster, user }) { heading={'Job Logs'} body={} /> + {isConfirmModalOpen && } onConfirmClick={handleConfirm} closeModal={closeConfirmModal} />} ) } diff --git a/share/dashboard_react/src/Pages/Settings/LogsSettings.jsx b/share/dashboard_react/src/Pages/Settings/LogsSettings.jsx index 042f94654..a1d068af4 100644 --- a/share/dashboard_react/src/Pages/Settings/LogsSettings.jsx +++ b/share/dashboard_react/src/Pages/Settings/LogsSettings.jsx @@ -271,16 +271,16 @@ function LogsSettings({ selectedCluster, user, openConfirmModal }) { ) }, { - key: 'Log Backup Archive Level', + key: 'Log Restic', value: ( dispatch( setSetting({ clusterName: selectedCluster?.name, - setting: 'log-level-archive', + setting: 'log-level-restic', value: val }) ) diff --git a/share/dashboard_react/src/redux/clusterSlice.js b/share/dashboard_react/src/redux/clusterSlice.js index dfa3836fd..7334337a8 100644 --- a/share/dashboard_react/src/redux/clusterSlice.js +++ b/share/dashboard_react/src/redux/clusterSlice.js @@ -1,7 +1,7 @@ import { createSlice, createAsyncThunk, isAnyOf } from '@reduxjs/toolkit' import { clusterService } from '../services/clusterService' import { handleError, showErrorBanner, showSuccessBanner } from '../utility/common' -import { isEqual } from 'lodash'; +import { get, isEqual } from 'lodash'; export const getClusterData = createAsyncThunk('cluster/getClusterData', async ({ clusterName }, thunkAPI) => { try { @@ -160,15 +160,23 @@ export const getOpenSVCStats = createAsyncThunk('cluster/getOpenSVCStats', async } }); -export const getBackupSnapshot = createAsyncThunk('cluster/getBackupSnapshot', async ({ clusterName }, thunkAPI) => { +export const getBackups = createAsyncThunk('cluster/getBackups', async ({ clusterName }, thunkAPI) => { try { const baseURL = thunkAPI.getState()?.auth?.baseURL || '' - const { data, status } = await clusterService.getBackupSnapshot(clusterName, baseURL) + const { data, status } = await clusterService.getBackups(clusterName, baseURL) return { data, status } } catch (error) { handleError(error, thunkAPI) } -}) +}, { + condition: (_, { getState }) => { + const { cluster } = getState(); + if (cluster.isFetching.backups.list) { + return false; + } + } + } +) export const getBackupStats = createAsyncThunk('cluster/getBackupStats', async ({ clusterName }, thunkAPI) => { try { @@ -178,6 +186,124 @@ export const getBackupStats = createAsyncThunk('cluster/getBackupStats', async ( } catch (error) { handleError(error, thunkAPI) } +}, { + condition: (_, { getState }) => { + const { cluster } = getState(); + if (cluster.isFetching.backups.stats) { + return false; + } + } +}) + +export const getResticSnapshot = createAsyncThunk('cluster/getResticSnapshot', async ({ clusterName }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.getResticSnapshot(clusterName, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } +},{ + condition: (_, { getState }) => { + const { cluster } = getState(); + if (cluster.isFetching.restic.snapshots) { + return false; + } + } +}) + +export const getResticStats = createAsyncThunk('cluster/getResticStats', async ({ clusterName }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.getResticStats(clusterName, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } +}, { + condition: (_, { getState }) => { + const { cluster } = getState(); + if (cluster.isFetching.restic.stats) { + return false; + } + } +}) + +export const purgeResticSnapshot = createAsyncThunk('cluster/purgeResticSnapshot', async ({ clusterName, snapshotId }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.purgeResticSnapshot(clusterName, snapshotId, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } +}) + +export const purgeResticByPolicy = createAsyncThunk('cluster/purgeResticByPolicy', async ({ clusterName }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.purgeResticByPolicy(clusterName, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } +}) + +export const getResticQueue = createAsyncThunk('cluster/getResticQueue', async ({ clusterName }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.getResticQueue(clusterName, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } +}, { + condition: (_, { getState }) => { + const { cluster } = getState(); + if (cluster.isFetching.restic.queue) { + return false; + } + } +}) + +export const resticQueueCancel = createAsyncThunk('cluster/resticQueueCancel', async ({ clusterName, taskId }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.resticQueueCancel(clusterName, taskId, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } +}) + +export const resticQueueMove = createAsyncThunk('cluster/resticQueueMove', async ({ clusterName, taskId, direction, afterId }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.resticQueueMove(clusterName, taskId, direction, afterId, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } +}) + +export const resticQueuePause = createAsyncThunk('cluster/resticQueuePause', async ({ clusterName }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.resticQueuePause(clusterName, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } +}) + +export const resticQueueResume = createAsyncThunk('cluster/resticQueueResume', async ({ clusterName }, thunkAPI) => { + try { + const baseURL = thunkAPI.getState()?.auth?.baseURL || '' + const { data, status } = await clusterService.resticQueueResume(clusterName, baseURL) + return { data, status } + } catch (error) { + handleError(error, thunkAPI) + } }) export const getJobs = createAsyncThunk('cluster/getJobs', async ({ clusterName }, thunkAPI) => { @@ -1799,7 +1925,16 @@ const initialState = { serviceOpensvc: false, metadataLocks: false, responsetime: false, - } + }, + backups: { + list: false, + stats: false + }, + restic: { + snapshots: false, + stats: false, + queue: false + }, }, error: null, clusterApps: null, @@ -1817,9 +1952,14 @@ const initialState = { clusterCertificates: null, clusterStates: null, backups: { - snapshots: null, + list: null, stats: null }, + restic: { + snapshots: null, + stats: null, + queue: null + }, topProcess: null, opensvcStats: null, jobs: null, @@ -1892,10 +2032,13 @@ export const clusterSlice = createSlice({ getDatabaseVariables.fulfilled, getTopProcess.fulfilled, getOpenSVCStats.fulfilled, - getBackupSnapshot.fulfilled, - getBackupStats.fulfilled, getShardSchema.fulfilled, getQueryRules.fulfilled, + getBackups.fulfilled, + getBackupStats.fulfilled, + getResticSnapshot.fulfilled, + getResticStats.fulfilled, + getResticQueue.fulfilled, getJobs.fulfilled ), (state, action) => { @@ -1933,10 +2076,21 @@ export const clusterSlice = createSlice({ } else if (action.type.includes('getOpenSVCStats')) { state.opensvcStats = action.payload.data state.isFetching.opensvcStats = false - } else if (action.type.includes('getBackupSnapshot')) { - state.backups.snapshots = action.payload.data + } else if (action.type.includes('getBackups')) { + state.backups.list = action.payload.data + state.isFetching.backups.list = false } else if (action.type.includes('getBackupStats')) { state.backups.stats = action.payload.data + state.isFetching.backups.stats = false + } else if (action.type.includes('getResticSnapshot')) { + state.restic.snapshots = action.payload.data + state.isFetching.restic.snapshots = false + } else if (action.type.includes('getResticStats')) { + state.restic.stats = action.payload.data + state.isFetching.restic.stats = false + } else if (action.type.includes('getResticQueue')) { + state.restic.queue = action.payload.data + state.isFetching.restic.queue = false } else if (action.type.includes('getShardSchema')) { state.shardSchema = action.payload.data } else if (action.type.includes('getQueryRules')) { @@ -1959,7 +2113,7 @@ export const clusterSlice = createSlice({ state.isFetching.database.sqlerrors = false } else if (serviceName === 'auditlog') { state.database.auditlogs = action.payload.data - state.isFetching.database.auditlogs = false + state.isFetching.database.auditlogs = false } else if (serviceName === 'digest-statements-pfs') { state.database.digestQueries = action.payload.data state.isFetching.database.digestQueries = false @@ -2004,6 +2158,11 @@ export const clusterSlice = createSlice({ getTopProcess.pending, getOpenSVCStats.pending, getDatabaseService.pending, + getResticStats.pending, + getResticSnapshot.pending, + getResticQueue.pending, + getBackups.pending, + getBackupStats.pending, ), (state, action) => { if (action.type.includes('getClusterData')) { @@ -2024,6 +2183,16 @@ export const clusterSlice = createSlice({ state.isFetching.top = true } else if (action.type.includes('getOpenSVCStats')) { state.isFetching.opensvcStats = true + } else if (action.type.includes('getBackups')) { + state.isFetching.backups.list = true + } else if (action.type.includes('getBackupStats')) { + state.isFetching.backups.stats = true + } else if (action.type.includes('getResticSnapshot')) { + state.isFetching.restic.snapshots = true + } else if (action.type.includes('getResticStats')) { + state.isFetching.restic.stats = true + } else if (action.type.includes('getResticQueue')) { + state.isFetching.restic.queue = true } else if (action.type.includes('getDatabaseService')) { const { serviceName } = action.meta.arg if (serviceName === 'processlist') { @@ -2068,6 +2237,13 @@ export const clusterSlice = createSlice({ getClusterProxies.rejected, getClusterCertificates.rejected, getDatabaseService.rejected, + getBackups.rejected, + getBackupStats.rejected, + getResticSnapshot.rejected, + getResticStats.rejected, + getResticQueue.rejected, + getTopProcess.rejected, + getOpenSVCStats.rejected ), (state, action) => { if (action.type.includes('getClusterData')) { state.isFetching.cluster = false @@ -2083,6 +2259,16 @@ export const clusterSlice = createSlice({ state.isFetching.proxies = false } else if (action.type.includes('getClusterApps')) { state.isFetching.apps = false + } else if (action.type.includes('getBackups')) { + state.isFetching.backups.list = false + } else if (action.type.includes('getBackupStats')) { + state.isFetching.backups.stats = false + } else if (action.type.includes('getResticSnapshot')) { + state.isFetching.restic.snapshots = false + } else if (action.type.includes('getResticStats')) { + state.isFetching.restic.stats = false + } else if (action.type.includes('getResticQueue')) { + state.isFetching.restic.tasks = false } else if (action.type.includes('getTopProcess')) { state.isFetching.top = false } else if (action.type.includes('getOpenSVCStats')) { diff --git a/share/dashboard_react/src/services/clusterService.js b/share/dashboard_react/src/services/clusterService.js index 434c78825..5465df838 100644 --- a/share/dashboard_react/src/services/clusterService.js +++ b/share/dashboard_react/src/services/clusterService.js @@ -11,11 +11,23 @@ export const clusterService = { getClusterCertificates, getTopProcess, getOpenSVCStats, - getBackupSnapshot, + getBackups, getBackupStats, getJobs, getShardSchema, getQueryRules, + + // Restic management APIs + getResticSnapshot, + getResticStats, + purgeResticSnapshot, + purgeResticByPolicy, + getResticQueue, + resticQueueResume, + resticQueuePause, + resticQueueMove, + resticQueueCancel, + resticQueueReset, // Cluster management APIs checksumAllTables, @@ -181,7 +193,7 @@ function getOpenSVCStats(clusterName, baseURL) { return getApi(baseURL).get(`clusters/${clusterName}/opensvc-stats`) } -function getBackupSnapshot(clusterName, baseURL) { +function getBackups(clusterName, baseURL) { return getApi(baseURL).get(`clusters/${clusterName}/backups`) } @@ -189,6 +201,15 @@ function getBackupStats(clusterName, baseURL) { return getApi(baseURL).get(`clusters/${clusterName}/backups/stats`) } + +function getResticSnapshot(clusterName, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/snapshots`) +} + +function getResticStats(clusterName, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/stats`) +} + function getJobs(clusterName, baseURL) { return getApi(baseURL).get(`clusters/${clusterName}/jobs`) } @@ -662,3 +683,43 @@ function storageFieldIndexDrop(clusterName, appId, field, index, baseURL) { function connectDockerRegistry(clusterName, dockerRegistry = {}, baseURL) { return getApi(baseURL).post(`clusters/${clusterName}/actions/docker/actions/registry-connect`, { ...dockerRegistry }) } + + +// Restic functions +function purgeResticSnapshot(clusterName, snapshotId, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/purge/${snapshotId}`) +} + +function purgeResticByPolicy(clusterName, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/purge/policy`) +} + +function getResticQueue(clusterName, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/task-queue`) +} + +function resticQueueResume(clusterName, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/task-queue/resume`) +} + +function resticQueuePause(clusterName, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/task-queue/pause`) +} + +function resticQueueMove(clusterName, moveType, taskID, afterID, baseURL) { + if (moveType == "after") { + return getApi(baseURL).get(`clusters/${clusterName}/restic/task-queue/move/${moveType}/${taskID}/${afterID}`) + } else { + return getApi(baseURL).get(`clusters/${clusterName}/restic/task-queue/move/${moveType}/${taskID}`) + } +} + +function resticQueueCancel(clusterName, taskID, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/task-queue/cancel/${taskID}`) +} + +function resticQueueReset(clusterName, baseURL) { + return getApi(baseURL).get(`clusters/${clusterName}/restic/task-queue/reset`) +} + +// Utility functions \ No newline at end of file diff --git a/share/dashboard_react/vite.config.js b/share/dashboard_react/vite.config.js index 9ded9f5a9..3bf5ecde0 100644 --- a/share/dashboard_react/vite.config.js +++ b/share/dashboard_react/vite.config.js @@ -9,7 +9,11 @@ export default defineConfig({ https: true, proxy: { '/api': { - target: 'https://repman.marie-dev.svc.cloud18:10005/', + target: 'https://172.18.0.10:10005/', + secure: false + }, + '/graphite': { + target: 'https://172.18.0.10:10005/', secure: false } } diff --git a/utils/archiver/archiver.go b/utils/archiver/archiver.go deleted file mode 100644 index cef54ae01..000000000 --- a/utils/archiver/archiver.go +++ /dev/null @@ -1,26 +0,0 @@ -package archiver - -import "google.golang.org/protobuf/runtime/protoimpl" - -type BackupStat struct { - TotalSize int64 `protobuf:"varint,1,opt,name=total_size,proto3" json:"total_size"` - TotalFileCount int64 `protobuf:"varint,2,opt,name=total_file_count,proto3" json:"total_file_count"` - TotalBlobCount int64 `protobuf:"varint,3,opt,name=total_blob_count,proto3" json:"total_blob_count"` -} - -type Backup struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ShortId string `protobuf:"bytes,2,opt,name=short_id,proto3" json:"short_id,omitempty"` - Time string `protobuf:"bytes,3,opt,name=time,proto3" json:"time,omitempty"` - Tree string `protobuf:"bytes,4,opt,name=tree,proto3" json:"tree,omitempty"` - Paths []string `protobuf:"bytes,5,rep,name=paths,proto3" json:"paths,omitempty"` - Hostname string `protobuf:"bytes,6,opt,name=hostname,proto3" json:"hostname,omitempty"` - Username string `protobuf:"bytes,7,opt,name=username,proto3" json:"username,omitempty"` - Uid int64 `protobuf:"varint,8,opt,name=uid,proto3" json:"uid,omitempty"` - Gid int64 `protobuf:"varint,9,opt,name=gid,proto3" json:"gid,omitempty"` - Tags []string `protobuf:"bytes,10,rep,name=tags,proto3" json:"tags,omitempty"` -} diff --git a/utils/backupmgr/backup.go b/utils/backupmgr/backup.go new file mode 100644 index 000000000..41ec7c8d5 --- /dev/null +++ b/utils/backupmgr/backup.go @@ -0,0 +1,218 @@ +package backupmgr + +import ( + "os" + "path/filepath" + "sync" + "time" + + "google.golang.org/protobuf/runtime/protoimpl" +) + +type BackupMethod int + +const ( + BackupMethodLogical = 1 + BackupMethodPhysical = 2 +) + +type BackupStrategy int + +const ( + BackupStrategyFull = 1 + BackupStrategyIncremental = 2 + BackupStrategyDifferential = 3 +) + +type BackupMetadata struct { + Id int64 `json:"id"` + StartTime time.Time `json:"startTime"` + EndTime time.Time `json:"endTime"` + BackupMethod BackupMethod `json:"backupMethod"` + BackupTool string `json:"backupTool"` + BackupToolVersion string `json:"backupToolVersion"` + BackupStrategy BackupStrategy `json:"backupStrategy"` + Source string `json:"source"` + Dest string `json:"dest"` + Size int64 `json:"size"` + FileCount int64 `json:"fileCount"` + Compressed bool `json:"compressed"` + Encrypted bool `json:"encrypted"` + EncryptionAlgo string `json:"encryptionAlgo"` + EncryptionKey string `json:"encryptionKey"` + Checksum string `json:"checksum"` + RetentionDays int `json:"retentionDays"` + BinLogFileName string `json:"binLogFileName"` + BinLogFilePos uint64 `json:"binLogFilePos"` + BinLogGtid string `json:"binLogUuid"` + Completed bool `json:"completed"` + SplitUser bool `json:"splitUser"` + Previous int64 `json:"previous"` +} + +type PointInTimeMeta struct { + IsInPITR bool + UseBinlog bool + Backup int64 + RestoreTime int64 +} + +func (bm *BackupMetadata) GetSizeAndFileCount() error { + var size int64 = 0 + var fileCount int64 = 0 + err := filepath.Walk(bm.Dest, func(_ string, info os.FileInfo, err error) error { + if err == nil && !info.IsDir() { + size += info.Size() + fileCount++ + } + return err + }) + bm.Size = size + bm.FileCount = fileCount + return err +} + +type ReadBinaryLogsBoundary struct { + UseTimestamp bool + Filename string + Position int64 + Timestamp time.Time +} + +type ReadBinaryLogsRange struct { + Start ReadBinaryLogsBoundary + End ReadBinaryLogsBoundary +} + +type BackupStat struct { + TotalSize int64 `protobuf:"varint,1,opt,name=total_size,proto3" json:"total_size"` + TotalFileCount int64 `protobuf:"varint,2,opt,name=total_file_count,proto3" json:"total_file_count"` + TotalBlobCount int64 `protobuf:"varint,3,opt,name=total_blob_count,proto3" json:"total_blob_count"` +} + +type BackupSnapshot struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ShortId string `protobuf:"bytes,2,opt,name=short_id,proto3" json:"short_id,omitempty"` + Time string `protobuf:"bytes,3,opt,name=time,proto3" json:"time,omitempty"` + Tree string `protobuf:"bytes,4,opt,name=tree,proto3" json:"tree,omitempty"` + Paths []string `protobuf:"bytes,5,rep,name=paths,proto3" json:"paths,omitempty"` + Hostname string `protobuf:"bytes,6,opt,name=hostname,proto3" json:"hostname,omitempty"` + Username string `protobuf:"bytes,7,opt,name=username,proto3" json:"username,omitempty"` + Uid int64 `protobuf:"varint,8,opt,name=uid,proto3" json:"uid,omitempty"` + Gid int64 `protobuf:"varint,9,opt,name=gid,proto3" json:"gid,omitempty"` + Tags []string `protobuf:"bytes,10,rep,name=tags,proto3" json:"tags,omitempty"` +} + +type BackupMetaMap struct { + *sync.Map +} + +func NewBackupMetaMap() *BackupMetaMap { + s := new(sync.Map) + m := &BackupMetaMap{Map: s} + return m +} + +func (m *BackupMetaMap) Get(key int64) *BackupMetadata { + if v, ok := m.Load(key); ok { + return v.(*BackupMetadata) + } + return nil +} + +func (m *BackupMetaMap) CheckAndGet(key int64) (*BackupMetadata, bool) { + v, ok := m.Load(key) + if ok { + return v.(*BackupMetadata), true + } + return nil, false +} + +func (m *BackupMetaMap) Set(key int64, value *BackupMetadata) { + m.Store(key, value) +} + +func (m *BackupMetaMap) ToNormalMap(c map[int64]*BackupMetadata) { + // Clear the old values in the output map + for k := range c { + delete(c, k) + } + + // Insert all values from the BackupMetaMap to the output map + m.Callback(func(key int64, value *BackupMetadata) bool { + c[key] = value + return true + }) +} + +func (m *BackupMetaMap) ToNewMap() map[int64]*BackupMetadata { + result := make(map[int64]*BackupMetadata) + m.Range(func(k, v any) bool { + result[k.(int64)] = v.(*BackupMetadata) + return true + }) + return result +} + +func (m *BackupMetaMap) Callback(f func(key int64, value *BackupMetadata) bool) { + m.Range(func(k, v any) bool { + return f(k.(int64), v.(*BackupMetadata)) + }) +} + +func (m *BackupMetaMap) Clear() { + m.Range(func(key, value any) bool { + m.Delete(key.(int64)) + return true + }) +} + +func FromNormalBackupMetaMap(m *BackupMetaMap, c map[int64]*BackupMetadata) *BackupMetaMap { + if m == nil { + m = NewBackupMetaMap() + } else { + m.Clear() + } + + for k, v := range c { + m.Set(k, v) + } + + return m +} + +func FromBackupMetaMap(m *BackupMetaMap, c *BackupMetaMap) *BackupMetaMap { + if m == nil { + m = NewBackupMetaMap() + } else { + m.Clear() + } + + if c != nil { + c.Callback(func(key int64, value *BackupMetadata) bool { + m.Set(key, value) + return true + }) + } + + return m +} + +// GetBackupsByToolAndSource retrieves backups with the same backupTool and source. +func (b *BackupMetaMap) GetPreviousBackup(backupTool string, source string) *BackupMetadata { + var result *BackupMetadata + b.Map.Range(func(key, value interface{}) bool { + if backup, ok := value.(*BackupMetadata); ok { + if backup.BackupTool == backupTool && backup.Source == source { + result = backup + return false + } + } + return true + }) + return result +} diff --git a/utils/archiver/restic.go b/utils/backupmgr/restic.go similarity index 59% rename from utils/archiver/restic.go rename to utils/backupmgr/restic.go index a9920b285..6bd6bde9e 100644 --- a/utils/archiver/restic.go +++ b/utils/backupmgr/restic.go @@ -1,9 +1,10 @@ -package archiver +package backupmgr import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "io" "os" @@ -13,7 +14,7 @@ import ( "sync" "time" - "github.com/signal18/replication-manager/utils/state" + "github.com/signal18/replication-manager/utils/s18log" "github.com/sirupsen/logrus" ) @@ -21,23 +22,35 @@ import ( type TaskType int const ( - FetchTask TaskType = iota - PurgeTask + InitTask TaskType = iota + FetchTask BackupTask + PurgeTask UnlockTask + ChangePassTask +) + +type MoveType string + +const ( + MoveFirst MoveType = "first" + MoveAfter MoveType = "after" + MoveLast MoveType = "last" ) func GetTaskName(TaskType TaskType) string { switch TaskType { case 0: - return "fetch" + return "init" case 1: - return "backup" + return "fetch" case 2: - return "purge" + return "backup" case 3: - return "unlock" + return "purge" case 4: + return "unlock" + case 5: return "changepass" default: return "Unknown" @@ -47,13 +60,11 @@ func GetTaskName(TaskType TaskType) string { // Task represents a queue task type ResticTask struct { ID int `json:"task_id"` + Type TaskType `json:"task_type"` DirPath string `json:"dir_path"` - NewPassFile string `json:"new_pass_file"` Tags []string `json:"tags"` - Type TaskType `json:"task_type"` Opt ResticPurgeOption `json:"opt"` - ErrorState state.State `json:"error_state"` - Result chan ResticResult `json:"-"` // Only used if caller needs the result + NewPassFile string `json:"-"` } // ResticResult holds the output or error of a task @@ -65,18 +76,19 @@ type ResticResult struct { // ResticPurgeOption holds the configuration for purge type ResticPurgeOption struct { - KeepLast int - KeepHourly int - KeepDaily int - KeepWeekly int - KeepMonthly int - KeepYearly int - KeepWithin string - KeepWithinHourly string - KeepWithinDaily string - KeepWithinWeekly string - KeepWithinMonthly string - KeepWithinYearly string + SnapshotID string `json:"snapshot_id,omitempty"` + KeepLast int `json:"keep_last,omitempty"` + KeepHourly int `json:"keep_hourly,omitempty"` + KeepDaily int `json:"keep_daily,omitempty"` + KeepWeekly int `json:"keep_weekly,omitempty"` + KeepMonthly int `json:"keep_monthly,omitempty"` + KeepYearly int `json:"keep_yearly,omitempty"` + KeepWithin string `json:"keep_within,omitempty"` + KeepWithinHourly string `json:"keep_within_hourly,omitempty"` + KeepWithinDaily string `json:"keep_within_daily,omitempty"` + KeepWithinWeekly string `json:"keep_within_weekly,omitempty"` + KeepWithinMonthly string `json:"keep_within_monthly,omitempty"` + KeepWithinYearly string `json:"keep_within_yearly,omitempty"` } // TaskStatus represents the task state information stored in the JSON flag file @@ -87,54 +99,126 @@ type TaskStatus struct { Completion string `json:"completion_time,omitempty"` // Only present if completed } -// ResticRepo manages the queue and execution -type ResticRepo struct { +// ResticManager manages the queue and execution +type ResticManager struct { BinaryPath string Env []string - Backups []Backup + Backups []BackupSnapshot BackupStat BackupStat - Logger *logrus.Logger - LogFields logrus.Fields - LogLevel logrus.Level TaskQueue []*ResticTask + TaskErrors map[TaskType]error + errorMutex *sync.Mutex ResultChan chan ResticResult + LogModule int + MessageChan chan s18log.HttpMessage Shutdown bool - WorkerWG sync.WaitGroup - Mutex sync.Mutex + Mutex *sync.Mutex + cond *sync.Cond // Condition variable for waiting and notifying tasks + stopCh chan struct{} // Stop channel to signal the goroutine to stop CanFetch bool CanInitRepo bool + isPaused bool HasLocks bool taskID int CurrentID int } // NewResticRepo initializes the repository manager -func NewResticRepo(binaryPath string, logger *logrus.Logger, logfields logrus.Fields, loglevel logrus.Level) *ResticRepo { - repo := &ResticRepo{ +func NewResticRepo(binaryPath string, msgChan chan s18log.HttpMessage, logmodule int) *ResticManager { + repo := &ResticManager{ BinaryPath: binaryPath, - Backups: make([]Backup, 0), - Logger: logger, - LogFields: logfields, - LogLevel: loglevel, + Backups: make([]BackupSnapshot, 0), + MessageChan: msgChan, + LogModule: logmodule, TaskQueue: make([]*ResticTask, 0), + Mutex: &sync.Mutex{}, + TaskErrors: make(map[TaskType]error), + errorMutex: &sync.Mutex{}, ResultChan: make(chan ResticResult, 10), + stopCh: make(chan struct{}), CanFetch: true, CanInitRepo: true, } + + repo.cond = sync.NewCond(repo.Mutex) go repo.worker() // Start the worker - go repo.WaitForResults() return repo } -func (repo *ResticRepo) SetEnv(env []string) { - repo.Env = env +func (repo *ResticManager) IsPaused() bool { + repo.Mutex.Lock() + defer repo.Mutex.Unlock() + return repo.isPaused } -// UpdateEnvKey updates the environment variable for the Restic repository -func (repo *ResticRepo) UpdateEnvKey(key, value string) { +func (repo *ResticManager) ResumeWorker() { repo.Mutex.Lock() defer repo.Mutex.Unlock() + repo.isPaused = false + repo.cond.Broadcast() // Wake up the worker goroutine +} + +func (repo *ResticManager) PauseWorker() { + repo.Mutex.Lock() + defer repo.Mutex.Unlock() + repo.isPaused = true +} + +func (repo *ResticManager) HasAnyError() bool { + repo.errorMutex.Lock() + defer repo.errorMutex.Unlock() + return len(repo.TaskErrors) > 0 +} + +func (repo *ResticManager) SetError(task TaskType, err error) { + repo.errorMutex.Lock() + defer repo.errorMutex.Unlock() + repo.TaskErrors[task] = err +} + +func (repo *ResticManager) FetchAndClearErrors() map[TaskType]error { + repo.errorMutex.Lock() + defer repo.errorMutex.Unlock() + + if len(repo.TaskErrors) == 0 { + return nil + } + + errs := make(map[TaskType]error) + for k, v := range repo.TaskErrors { + errs[k] = v + } + for k := range repo.TaskErrors { + delete(repo.TaskErrors, k) + } + + return errs +} + +func (repo *ResticManager) FetchAndClearError(task TaskType) error { + repo.errorMutex.Lock() + defer repo.errorMutex.Unlock() + + if len(repo.TaskErrors) == 0 { + return nil + } + + errs, exists := repo.TaskErrors[task] + if !exists { + return nil + } + + delete(repo.TaskErrors, task) + return errs +} + +func (repo *ResticManager) SetEnv(env []string) { + repo.Env = env +} + +// UpdateEnvKey updates the environment variable for the Restic repository +func (repo *ResticManager) UpdateEnvKey(key, value string) { found := false for i, env := range repo.Env { if strings.HasPrefix(env, key+"=") { @@ -149,10 +233,7 @@ func (repo *ResticRepo) UpdateEnvKey(key, value string) { } } -func (repo *ResticRepo) GetRepoPath() string { - repo.Mutex.Lock() - defer repo.Mutex.Unlock() - +func (repo *ResticManager) GetRepoPath() string { for _, env := range repo.Env { if strings.HasPrefix(env, "RESTIC_REPOSITORY") { return strings.Split(env, "=")[1] @@ -162,10 +243,7 @@ func (repo *ResticRepo) GetRepoPath() string { return "" } -func (repo *ResticRepo) GetCacheDirPath() string { - repo.Mutex.Lock() - defer repo.Mutex.Unlock() - +func (repo *ResticManager) GetCacheDirPath() string { for _, env := range repo.Env { if strings.HasPrefix(env, "RESTIC_CACHE_DIR") { return strings.Split(env, "=")[1] @@ -176,61 +254,49 @@ func (repo *ResticRepo) GetCacheDirPath() string { } // GenerateTaskID ensures unique task IDs -func (repo *ResticRepo) GenerateTaskID() int { - repo.Mutex.Lock() - defer repo.Mutex.Unlock() +func (repo *ResticManager) GenerateTaskID() int { repo.taskID++ return repo.taskID } // SetCanFetch updates CanFetch flag -func (repo *ResticRepo) SetCanFetch(value bool) { - repo.Mutex.Lock() - defer repo.Mutex.Unlock() +func (repo *ResticManager) SetCanFetch(value bool) { repo.CanFetch = value } // GetCanFetch returns CanFetch value -func (repo *ResticRepo) GetCanFetch() bool { - repo.Mutex.Lock() - defer repo.Mutex.Unlock() +func (repo *ResticManager) GetCanFetch() bool { return repo.CanFetch } -func (repo *ResticRepo) SetLogFields(fields logrus.Fields) { - repo.LogFields = fields -} - -// SetLogLevel updates the log level for archive module. This is different with logrus.SetLevel which sets the global log level. -func (repo *ResticRepo) SetLogLevel(level logrus.Level) { - repo.LogLevel = level -} - -func (repo *ResticRepo) Print(level logrus.Level, message string, args ...interface{}) { - if repo.LogLevel >= level { - repo.Logger.WithFields(repo.LogFields).Logf(level, message, args...) +func (repo *ResticManager) Print(level logrus.Level, message string, args ...interface{}) { + if repo.MessageChan != nil { + repo.MessageChan <- s18log.HttpMessage{ + Module: repo.LogModule, + Level: s18log.FromLogrusLevel(uint32(level)), + Text: fmt.Sprintf(message, args...), + Timestamp: fmt.Sprint(time.Now().Format("2006/01/02 15:04:05")), + } } } -func (repo *ResticRepo) worker() { - repo.WorkerWG.Add(1) - defer repo.WorkerWG.Done() +func (repo *ResticManager) worker() { for { repo.Mutex.Lock() - // Check if shutdown flag is set - if repo.Shutdown { - repo.Mutex.Unlock() - return // Exit worker + + for (len(repo.TaskQueue) == 0 || repo.isPaused) && !repo.Shutdown { + repo.cond.Wait() } - // If TaskQueue is empty, unlock and wait for a while - if len(repo.TaskQueue) == 0 { + // Shutdown requested (woken by cond or already set) + if repo.Shutdown { repo.Mutex.Unlock() - time.Sleep(500 * time.Millisecond) // Prevent busy-waiting - continue + return } + // Check for the stop signal before processing + // Get the task from TaskQueue task := repo.TaskQueue[0] repo.TaskQueue = repo.TaskQueue[1:] @@ -251,83 +317,70 @@ func (repo *ResticRepo) worker() { var result ResticResult switch task.Type { case FetchTask: - err := repo.ResticFetchRepo() + err := repo.FetchRepo() result = ResticResult{TaskID: task.ID, Error: err} case PurgeTask: - err := repo.ResticPurgeRepo(task.Opt) + err := repo.PurgeRepo(task.Opt) result = ResticResult{TaskID: task.ID, Error: err} case BackupTask: - err := repo.ResticBackup(task.DirPath, task.Tags) + err := repo.Backup(task.DirPath, task.Tags) result = ResticResult{TaskID: task.ID, Error: err} case UnlockTask: - err := repo.ResticUnlockRepo() + err := repo.UnlockRepo() result = ResticResult{TaskID: task.ID, Error: err} default: - result = ResticResult{TaskID: task.ID, Error: fmt.Errorf("unknown task type")} + repo.Print(logrus.WarnLevel, "Unknown task type: %d", task.Type) + continue } - // Send result to per-task channel (if waiting) - if task.Result != nil { - task.Result <- result - } else { - repo.ResultChan <- result + if result.Error != nil { + repo.SetError(task.Type, result.Error) } repo.Print(loglevel, "Worker finished task ID: %d", task.ID) } } -func (repo *ResticRepo) AddFetchTask(waitForResult bool) (*ResticResult, error) { - task := ResticTask{ - Type: FetchTask, - } - - var resultChan chan ResticResult - if waitForResult { - resultChan = make(chan ResticResult, 1) - task.Result = resultChan +func (repo *ResticManager) appendTask(task *ResticTask) { + if task == nil { + return } // Add task to slice repo.Mutex.Lock() - repo.TaskQueue = append(repo.TaskQueue, &task) - repo.Mutex.Unlock() + defer repo.Mutex.Unlock() - if waitForResult { - result := <-resultChan - return &result, result.Error + repo.TaskQueue = append(repo.TaskQueue, task) + + // Log the addition of the tasks with ID + if task.ID != 0 { + repo.Print(logrus.InfoLevel, "Added %s task to the queue, ID: %d", GetTaskName(task.Type), task.ID) } - return nil, nil + + // Notify the worker that a new task is available if not paused + repo.cond.Signal() } -func (repo *ResticRepo) AddPurgeTask(opt ResticPurgeOption, waitForResult bool) (*ResticResult, error) { +func (repo *ResticManager) AddFetchTask() { + // Add task to slice + repo.appendTask(&ResticTask{ + Type: FetchTask, + }) +} + +func (repo *ResticManager) AddPurgeTask(opt ResticPurgeOption) { task := ResticTask{ ID: repo.GenerateTaskID(), Type: PurgeTask, Opt: opt, } - var resultChan chan ResticResult - if waitForResult { - resultChan = make(chan ResticResult, 1) // If waiting, create result channel - task.Result = resultChan - } - // Add task to slice - repo.Mutex.Lock() - repo.TaskQueue = append(repo.TaskQueue, &task) - repo.Mutex.Unlock() - - repo.Print(logrus.InfoLevel, "Task %d submitted (Wait: %v)", task.ID, waitForResult) - - if waitForResult { - result := <-resultChan // Wait for result - return &result, result.Error - } - return nil, nil + repo.appendTask(&task) + repo.AddFetchTask() } -func (repo *ResticRepo) AddBackupTask(dirpath string, tags []string, waitForResult bool) (*ResticResult, error) { +func (repo *ResticManager) AddBackupTask(dirpath string, tags []string) { task := ResticTask{ ID: repo.GenerateTaskID(), Type: BackupTask, @@ -335,107 +388,211 @@ func (repo *ResticRepo) AddBackupTask(dirpath string, tags []string, waitForResu Tags: tags, } - var resultChan chan ResticResult - if waitForResult { - resultChan = make(chan ResticResult, 1) // If waiting, create result channel - task.Result = resultChan - } - // Add task to slice - repo.Mutex.Lock() - repo.TaskQueue = append(repo.TaskQueue, &task) - repo.Mutex.Unlock() - repo.Print(logrus.InfoLevel, "Task %d submitted (Wait: %v)", task.ID, waitForResult) - - if waitForResult { - result := <-resultChan // Wait for result - return &result, result.Error - } - return nil, nil + repo.appendTask(&task) + repo.AddFetchTask() } -func (repo *ResticRepo) AddUnlockTask(waitForResult bool) (*ResticResult, error) { +func (repo *ResticManager) AddUnlockTask() { task := ResticTask{ + ID: repo.GenerateTaskID(), Type: UnlockTask, } + repo.appendTask(&task) + repo.AddFetchTask() +} - var resultChan chan ResticResult - if waitForResult { - resultChan = make(chan ResticResult, 1) // If waiting, create result channel - task.Result = resultChan +func (repo *ResticManager) MoveTask(mvType string, taskID, afterTaskID int) error { + moveType := MoveType(mvType) + switch moveType { + case MoveFirst, MoveAfter, MoveLast: + return repo.moveTask(moveType, taskID, afterTaskID) + default: + return errors.New("invalid move type") } +} - // Add task to slice +func (repo *ResticManager) moveTask(moveType MoveType, taskID, afterTaskID int) error { repo.Mutex.Lock() - repo.TaskQueue = append(repo.TaskQueue, &task) - repo.Mutex.Unlock() - if waitForResult { - result := <-resultChan // Wait for result - return &result, result.Error + waspaused := repo.isPaused + if !waspaused { + repo.isPaused = true + } + + defer func() { + if !waspaused { + repo.isPaused = false + repo.cond.Broadcast() + } + repo.Mutex.Unlock() + }() + + var taskToMove *ResticTask + var taskIndex int + for i, task := range repo.TaskQueue { + if task.ID == taskID { + taskToMove = task + taskIndex = i + break + } + } + + if taskToMove == nil { + return errors.New("task not found") } - return nil, nil + + switch moveType { + case MoveFirst: + if taskIndex == 0 { + return nil // Already first + } + repo.TaskQueue = append(repo.TaskQueue[:taskIndex], repo.TaskQueue[taskIndex+1:]...) + repo.TaskQueue = append([]*ResticTask{taskToMove}, repo.TaskQueue...) + case MoveAfter: + if afterTaskID == 0 { + return errors.New("afterTaskID is required for MoveAfter") + } + + var afterIndex int = -1 + for i, task := range repo.TaskQueue { + if task.ID == afterTaskID { + afterIndex = i + break + } + } + + if afterIndex == -1 { + return errors.New("afterTaskID not found") + } + + if taskIndex == afterIndex+1 { + return nil // Already after the specified task + } + + if taskIndex < afterIndex { + afterIndex-- // Adjust index since we will remove the task first + } + + repo.TaskQueue = append(repo.TaskQueue[:taskIndex], repo.TaskQueue[taskIndex+1:]...) + repo.TaskQueue = append(repo.TaskQueue[:afterIndex+1], append([]*ResticTask{taskToMove}, repo.TaskQueue[afterIndex+1:]...)...) + case MoveLast: + if taskIndex == len(repo.TaskQueue)-1 { + return nil // Already last + } + repo.TaskQueue = append(repo.TaskQueue[:taskIndex], repo.TaskQueue[taskIndex+1:]...) + repo.TaskQueue = append(repo.TaskQueue, taskToMove) + } + + return nil } -func (repo *ResticRepo) HasFetchQueue() bool { +func (repo *ResticManager) HasFetchQueue() bool { repo.Mutex.Lock() defer repo.Mutex.Unlock() - if len(repo.TaskQueue) > 0 { - for _, task := range repo.TaskQueue { - if task.Type == FetchTask { - return true - } + + for _, task := range repo.TaskQueue { + if task.Type == FetchTask { + return true } } return false } -func (repo *ResticRepo) EmptyQueue() { - // Clear the task queue - repo.Print(logrus.InfoLevel, "Emptying task queue...") +func (repo *ResticManager) CancelTask(taskId int) { + repo.Mutex.Lock() + defer repo.Mutex.Unlock() + + repo.Print(logrus.InfoLevel, "Cancelling restic task ID: %d", taskId) + var taskToCancel *ResticTask for _, task := range repo.TaskQueue { - if task.Result != nil { - task.Result <- ResticResult{TaskID: task.ID, Error: fmt.Errorf("task cancelled due to queue emptying")} + if task.ID == taskId { + taskToCancel = task + break } } + if taskToCancel != nil { + repo.TaskQueue = append(repo.TaskQueue[:taskToCancel.ID], repo.TaskQueue[taskToCancel.ID+1:]...) + repo.Print(logrus.InfoLevel, "Cancelled restic task ID: %d", taskId) + } else { + repo.Print(logrus.WarnLevel, "Restic task ID not found: %d", taskId) + } +} + +func (repo *ResticManager) ClearQueue() { repo.Mutex.Lock() - repo.TaskQueue = make([]*ResticTask, 0) - repo.Mutex.Unlock() + defer repo.Mutex.Unlock() + + repo.Print(logrus.InfoLevel, "Emptying task queue...") + + tasklist := []string{} + for _, task := range repo.TaskQueue { + tasklist = append(tasklist, fmt.Sprintf("ID: %d, Type: %s", task.ID, GetTaskName(task.Type))) + } + + if len(tasklist) > 0 { + repo.Print(logrus.InfoLevel, "Clearing tasks: %s", strings.Join(tasklist, "; ")) + } + + repo.TaskQueue = repo.TaskQueue[:0] repo.Print(logrus.InfoLevel, "Task queue emptied.") } -func (repo *ResticRepo) ShutdownWorker() { +func (repo *ResticManager) ShutdownWorker() { repo.Mutex.Lock() - repo.Shutdown = true // Signal workers to stop + repo.Shutdown = true repo.Mutex.Unlock() - repo.Print(logrus.InfoLevel, "Shutting down workers...") - - // Wait for all workers to finish - repo.WorkerWG.Wait() - repo.Print(logrus.InfoLevel, "All workers stopped.") + repo.cond.Broadcast() } -// WaitForResults waits for the task results -func (repo *ResticRepo) WaitForResults() { - // Wait for task completion and handle results - for result := range repo.ResultChan { - if result.Error != nil { - repo.Print(logrus.ErrorLevel, "TaskID: %d Task: %s Error: %v", result.TaskID, GetTaskName(result.TaskType), result.Error) - } else { - repo.Print(logrus.InfoLevel, "TaskID: %d Task: %s Result: Completed", result.TaskID, GetTaskName(result.TaskType)) +func (repo *ResticManager) CheckRepoFiles() error { + repopath := repo.GetRepoPath() + + if _, err := os.Stat(filepath.Join(repopath, "config")); os.IsNotExist(err) { + // Check the repo data + errstr := "repo config is missing" + _, err := os.Stat(filepath.Join(repopath, "data")) + if err == nil { + errstr += " but data exists" + repo.CanInitRepo = false + err = errors.New(errstr) + repo.SetError(InitTask, err) + return err + } else if err != nil && !os.IsNotExist(err) { // Not a not-exist error (i.e., other error) + errstr += " and failed to check repo data: " + err.Error() + repo.CanInitRepo = false + err = errors.New(errstr) + repo.SetError(InitTask, err) + return err + } else { // Repo data does not exist (can init) + // Initialize the repo + err = repo.InitRepo(false) + if err != nil { + return err + } } + } else if err != nil { + repo.CanInitRepo = false + err = fmt.Errorf("failed to check repo config: %w", err) + repo.SetError(InitTask, err) + return err } + + repo.CanInitRepo = true + delete(repo.TaskErrors, InitTask) // Clear any previous init errors + + return nil } // RunCommand executes a command within the context of a Restic repository, capturing stdout and stderr. // It uses the ResticRepo's BinaryPath as the first parameter, along with any additional args. // Optionally, you can skip capturing the output to save memory. -func (repo *ResticRepo) RunCommand(args []string, loglevel logrus.Level, captureOutput bool) ([]byte, []byte, error) { +func (repo *ResticManager) RunCommand(args []string, loglevel logrus.Level, captureOutput bool) ([]byte, []byte, error) { // Set up the command cmd := exec.Command(repo.BinaryPath, args...) cmd.Env = append(os.Environ(), repo.Env...) @@ -502,7 +659,7 @@ func (repo *ResticRepo) RunCommand(args []string, loglevel logrus.Level, capture return nil, nil, nil } -func (repo *ResticRepo) ResticInitRepo(force bool) error { +func (repo *ResticManager) InitRepo(force bool) error { repopath := repo.GetRepoPath() if force { err := os.RemoveAll(repopath) @@ -513,6 +670,7 @@ func (repo *ResticRepo) ResticInitRepo(force bool) error { os.MkdirAll(repopath, 0755) } + defer repo.AddFetchTask() // Prepare the arguments for the "forget" command args := []string{"init"} @@ -521,16 +679,18 @@ func (repo *ResticRepo) ResticInitRepo(force bool) error { if err != nil { // Update the repo flag to prevent further fetch attempts repo.CanInitRepo = false + err = errors.New(string(stderr)) - // Handle error (including stderr) - return fmt.Errorf("failed to init repo: %v, stderr: %s", err, stderr) + repo.SetError(InitTask, err) + + return err } return nil } -// ResticFetchRepoStat performs the statistic fetch -func (repo *ResticRepo) ResticFetchRepoStat() error { +// fetchRepoStat performs the statistic fetch +func (repo *ResticManager) fetchRepoStat() error { // Prepare the arguments for the "forget" command args := []string{"stats", "--mode", "raw-data", "--json"} @@ -554,62 +714,20 @@ func (repo *ResticRepo) ResticFetchRepoStat() error { return nil } -// ResticFetchRepo performs the fetch -func (repo *ResticRepo) ResticFetchRepo() error { - // Check if the repo is able to fetch and initialized - if !repo.GetCanFetch() { - return nil - } - - // Check if the repo is initialized - repopath := repo.GetRepoPath() - if _, err := os.Stat(filepath.Join(repopath, "config")); os.IsNotExist(err) { - // Check the repo data - _, err := os.Stat(filepath.Join(repopath, "data")) - if os.IsNotExist(err) { - err = repo.ResticInitRepo(false) - if err != nil { - repo.CanInitRepo = false - return fmt.Errorf("failed to init repo: %w", err) - } - } else if err != nil { - repo.CanInitRepo = false - return fmt.Errorf("failed to check repo path: %w", err) - } else { - repo.CanInitRepo = false - // Repo data exists, but config does not - return fmt.Errorf("repo config is missing but data exists") - } - } else if err != nil { - repo.CanInitRepo = false - return fmt.Errorf("failed to check repo path: %w", err) - } - - repo.CanInitRepo = true - - // Check latest lock in repository - err := repo.CheckLocks() - if err != nil { - return fmt.Errorf("failed to check locks: %w", err) - } - - // Prevent fetching if there are locks - if repo.HasLocks { - return nil - } - +// fetchRepoSnapshots performs the snapshot fetch +func (repo *ResticManager) fetchRepoSnapshots() error { // Proceed with fetch args := []string{"snapshots", "--json"} stdout, stderr, err := repo.RunCommand(args, logrus.DebugLevel, true) if err != nil { if strings.Contains(string(stderr), "no such file or directory") { - _ = repo.ResticInitRepo(false) + _ = repo.InitRepo(false) } // Handle error (including stderr) return fmt.Errorf("failed to fetch repo: %v, stderr: %s", err, stderr) } - var backups []Backup + var backups []BackupSnapshot err = json.Unmarshal(stdout, &backups) if err != nil { return fmt.Errorf("failed to unmarshal backups: %w", err) @@ -618,7 +736,37 @@ func (repo *ResticRepo) ResticFetchRepo() error { // Update the Backups field with the fetched backups repo.Backups = backups - err = repo.ResticFetchRepoStat() + return nil // Success +} + +// FetchRepo performs the fetch for snapshots and stats +func (repo *ResticManager) FetchRepo() error { + // Check if the repo is able to fetch and initialized + if !repo.GetCanFetch() { + return nil + } + + repo.SetCanFetch(false) + defer repo.SetCanFetch(true) + + // Check if the repo is initialized + if err := repo.CheckRepoFiles(); err != nil { + return err + } + + // Check latest lock in repository + err := repo.CheckResticLocks() + if err != nil { + return err + } + + // Fetch snapshots + err = repo.fetchRepoSnapshots() + if err != nil { + return fmt.Errorf("failed to fetch repo snapshots: %w", err) + } + + err = repo.fetchRepoStat() if err != nil { return fmt.Errorf("failed to fetch repo stat: %w", err) } @@ -702,16 +850,20 @@ func GetKeepN(keepLast int, keepHourly int, keepDaily int, keepWeekly int, keepM return keep, useKeep } -// ResticPurgeRepo performs the actual purging of the repository -func (repo *ResticRepo) ResticPurgeRepo(opt ResticPurgeOption) error { - if !repo.GetCanFetch() { - time.Sleep(time.Second) - return repo.ResticPurgeRepo(opt) +func (repo *ResticManager) purgeSingleSnapshot(snapshotID string) error { + args := []string{"forget", "--prune", snapshotID} + + // Execute the Restic "forget" command using RunCommand + _, stderr, err := repo.RunCommand(args, logrus.InfoLevel, false) + if err != nil { + // Handle error (including stderr) + return fmt.Errorf("failed to purge repo: %v, stderr: %s", err, stderr) } - repo.SetCanFetch(false) - defer repo.SetCanFetch(true) - // Prepare the arguments for the "forget" command + return nil +} + +func (repo *ResticManager) purgeWithPolicy(opt ResticPurgeOption) error { args := []string{"forget", "--prune"} // Get the arguments for the "keep" options @@ -735,11 +887,49 @@ func (repo *ResticRepo) ResticPurgeRepo(opt ResticPurgeOption) error { return nil } -// ResticBackup performs the backup (mock implementation for now) -func (repo *ResticRepo) ResticBackup(dirpath string, tags []string) error { +// ResticPurgeRepo performs the actual purging of the repository +func (repo *ResticManager) PurgeRepo(opt ResticPurgeOption) error { + // Check if the repo is able to fetch and initialized + if !repo.GetCanFetch() { + time.Sleep(time.Second) + return repo.PurgeRepo(opt) + } + + repo.SetCanFetch(false) + defer repo.SetCanFetch(true) + + // Check if the repo is initialized + if err := repo.CheckRepoFiles(); err != nil { + return err + } + + // Check latest lock in repository + err := repo.CheckResticLocks() + if err != nil { + return err + } + + // Prepare the arguments for the "forget" command + + if opt.SnapshotID != "" { + err := repo.purgeSingleSnapshot(opt.SnapshotID) + if err != nil { + return err + } + } else { + err := repo.purgeWithPolicy(opt) + if err != nil { + return err + } + } + + return nil +} + +func (repo *ResticManager) Backup(dirpath string, tags []string) error { if !repo.GetCanFetch() { time.Sleep(time.Second) - return repo.ResticBackup(dirpath, tags) + return repo.Backup(dirpath, tags) } repo.SetCanFetch(false) @@ -767,16 +957,7 @@ func (repo *ResticRepo) ResticBackup(dirpath string, tags []string) error { return nil } -// ResticBackup performs the backup (mock implementation for now) -func (repo *ResticRepo) CheckLocks() error { - if !repo.GetCanFetch() { - time.Sleep(time.Second) - return repo.CheckLocks() - } - - repo.SetCanFetch(false) - defer repo.SetCanFetch(true) - +func (repo *ResticManager) CheckResticLocks() error { // Prepare the arguments for the "backup" command args := []string{"list", "locks", "--no-lock", "-q"} @@ -784,7 +965,7 @@ func (repo *ResticRepo) CheckLocks() error { stdout, stderr, err := repo.RunCommand(args, logrus.DebugLevel, true) if err != nil { if strings.Contains(string(stderr), "no such file or directory") { - _ = repo.ResticInitRepo(false) + _ = repo.InitRepo(false) } // Handle error (including stderr) return fmt.Errorf("failed to check repo locks: %v, stderr: %s", err, stderr) @@ -792,23 +973,28 @@ func (repo *ResticRepo) CheckLocks() error { haslock := len(stdout) > 0 + if haslock { + err = fmt.Errorf("repository has locks:\n%s", string(stdout)) + } + if repo.HasLocks != haslock { + repo.HasLocks = haslock if haslock { - repo.Print(logrus.InfoLevel, "Repository has locks") + return err } else { - repo.Print(logrus.InfoLevel, "Repository locks has unlocked") + repo.Print(logrus.InfoLevel, "Repository locks have been cleared") } - repo.HasLocks = haslock } return nil } -// ResticBackup performs the backup (mock implementation for now) -func (repo *ResticRepo) ResticUnlockRepo() error { +// ResticUnlockRepo unlocks the repository +func (repo *ResticManager) UnlockRepo() error { + if !repo.GetCanFetch() { time.Sleep(time.Second) - return repo.CheckLocks() + return repo.UnlockRepo() } repo.SetCanFetch(false) @@ -821,7 +1007,7 @@ func (repo *ResticRepo) ResticUnlockRepo() error { stdout, stderr, err := repo.RunCommand(args, logrus.InfoLevel, true) if err != nil { if strings.Contains(string(stderr), "no such file or directory") { - _ = repo.ResticInitRepo(false) + _ = repo.InitRepo(false) } // Handle error (including stderr) return fmt.Errorf("failed to check repo locks: %v, stderr: %s", err, stderr) @@ -836,10 +1022,10 @@ func (repo *ResticRepo) ResticUnlockRepo() error { } // ResticChangePassword changes the repository password -func (repo *ResticRepo) ResticAddPassword(newpassfile string) error { +func (repo *ResticManager) AddRepoKey(newpassfile string) error { if !repo.GetCanFetch() { time.Sleep(time.Second) - return repo.ResticAddPassword(newpassfile) + return repo.AddRepoKey(newpassfile) } repo.SetCanFetch(false) @@ -869,10 +1055,10 @@ type ResticKey struct { Created string `json:"created"` } -func (repo *ResticRepo) ResticKeyList() ([]ResticKey, error) { +func (repo *ResticManager) GetRepoKeyList() ([]ResticKey, error) { if !repo.GetCanFetch() { time.Sleep(time.Second) - return repo.ResticKeyList() + return repo.GetRepoKeyList() } repo.SetCanFetch(false) @@ -885,7 +1071,7 @@ func (repo *ResticRepo) ResticKeyList() ([]ResticKey, error) { stdout, stderr, err := repo.RunCommand(args, logrus.DebugLevel, true) if err != nil { if strings.Contains(string(stderr), "no such file or directory") { - _ = repo.ResticInitRepo(false) + _ = repo.InitRepo(false) } // Handle error (including stderr) return nil, fmt.Errorf("failed to list repo keys: %v, stderr: %s", err, stderr) @@ -899,10 +1085,10 @@ func (repo *ResticRepo) ResticKeyList() ([]ResticKey, error) { return keys, nil } -func (repo *ResticRepo) ResticRemoveKey(keyid string) error { +func (repo *ResticManager) RemoveRepoKey(keyid string) error { if !repo.GetCanFetch() { time.Sleep(time.Second) - return repo.ResticRemoveKey(keyid) + return repo.RemoveRepoKey(keyid) } repo.SetCanFetch(false) @@ -924,10 +1110,10 @@ func (repo *ResticRepo) ResticRemoveKey(keyid string) error { return nil } -func (repo *ResticRepo) ResticTestPassword(newpass string) error { +func (repo *ResticManager) TestPassword(newpass string) error { if !repo.GetCanFetch() { time.Sleep(time.Second) - return repo.ResticTestPassword(newpass) + return repo.TestPassword(newpass) } repo.SetCanFetch(false) diff --git a/utils/dbhelper/dbhelper.go b/utils/dbhelper/dbhelper.go index 09811d6c5..e61fb9ad7 100644 --- a/utils/dbhelper/dbhelper.go +++ b/utils/dbhelper/dbhelper.go @@ -29,9 +29,9 @@ import ( "github.com/jmoiron/sqlx" "github.com/percona/go-mysql/query" - v3 "github.com/signal18/replication-manager/repmanv3" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/version" + "google.golang.org/protobuf/runtime/protoimpl" ) const debug = false @@ -102,7 +102,86 @@ func (a PFSQuerySorter) Less(i, j int) bool { return l > r } -type TableSizeSorter []*v3.Table +type Table struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TableSchema string `protobuf:"bytes,1,opt,name=table_schema,json=tableSchema,proto3" json:"table_schema,omitempty"` + TableName string `protobuf:"bytes,2,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + Engine string `protobuf:"bytes,3,opt,name=engine,proto3" json:"engine,omitempty"` + TableRows int64 `protobuf:"varint,4,opt,name=table_rows,json=tableRows,proto3" json:"table_rows,omitempty"` + DataLength int64 `protobuf:"varint,5,opt,name=data_length,json=dataLength,proto3" json:"data_length,omitempty"` + IndexLength int64 `protobuf:"varint,6,opt,name=index_length,json=indexLength,proto3" json:"index_length,omitempty"` + TableCrc uint64 `protobuf:"varint,7,opt,name=table_crc,json=tableCrc,proto3" json:"table_crc,omitempty"` + TableClusters string `protobuf:"bytes,8,opt,name=table_clusters,json=tableClusters,proto3" json:"table_clusters,omitempty"` + TableSync string `protobuf:"bytes,9,opt,name=table_sync,json=tableSync,proto3" json:"table_sync,omitempty"` +} + +func (x *Table) GetTableSchema() string { + if x != nil { + return x.TableSchema + } + return "" +} + +func (x *Table) GetTableName() string { + if x != nil { + return x.TableName + } + return "" +} + +func (x *Table) GetEngine() string { + if x != nil { + return x.Engine + } + return "" +} + +func (x *Table) GetTableRows() int64 { + if x != nil { + return x.TableRows + } + return 0 +} + +func (x *Table) GetDataLength() int64 { + if x != nil { + return x.DataLength + } + return 0 +} + +func (x *Table) GetIndexLength() int64 { + if x != nil { + return x.IndexLength + } + return 0 +} + +func (x *Table) GetTableCrc() uint64 { + if x != nil { + return x.TableCrc + } + return 0 +} + +func (x *Table) GetTableClusters() string { + if x != nil { + return x.TableClusters + } + return "" +} + +func (x *Table) GetTableSync() string { + if x != nil { + return x.TableSync + } + return "" +} + +type TableSizeSorter []*Table func (a TableSizeSorter) Len() int { return len(a) } func (a TableSizeSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } @@ -118,7 +197,7 @@ type Disk struct { Available int32 } -/* replaced by v3.Table +/* replaced by Table type Table struct { Table_schema string `json:"tableSchema"` Table_name string `json:"tableName"` @@ -2097,9 +2176,9 @@ func GetNoBlockOnMedataLock(db *sqlx.DB, myver *version.Version) string { } return noBlockOnMedataLock } -func GetTables(db *sqlx.DB, myver *version.Version) (map[string]*v3.Table, []v3.Table, string, error) { - vars := make(map[string]*v3.Table) - var tblList []v3.Table +func GetTables(db *sqlx.DB, myver *version.Version) (map[string]*Table, []Table, string, error) { + vars := make(map[string]*Table) + var tblList []Table logs := "" query := GetNoBlockOnMedataLock(db, myver) + "SELECT SCHEMA_NAME from information_schema.SCHEMATA WHERE SCHEMA_NAME NOT IN('information_schema','mysql','performance_schema', 'sys') AND SCHEMA_NAME NOT LIKE '#%'" @@ -2134,7 +2213,7 @@ func GetTables(db *sqlx.DB, myver *version.Version) (map[string]*v3.Table, []v3. defer rows.Close() crc64Table := crc64.MakeTable(0xC96C5795D7870F42) for rows.Next() { - var v v3.Table + var v Table err = rows.Scan(&v.TableSchema, &v.TableName, &v.Engine, &v.TableRows, &v.DataLength, &v.IndexLength, &v.TableCrc) if err != nil { diff --git a/utils/dbhelper/map.go b/utils/dbhelper/map.go new file mode 100644 index 000000000..1489502ea --- /dev/null +++ b/utils/dbhelper/map.go @@ -0,0 +1,384 @@ +package dbhelper + +import "sync" + +type PFSQueriesMap struct { + *sync.Map +} + +func NewPFSQueriesMap() *PFSQueriesMap { + s := new(sync.Map) + m := &PFSQueriesMap{Map: s} + return m +} + +func (m *PFSQueriesMap) Get(key string) *PFSQuery { + if v, ok := m.Load(key); ok { + return v.(*PFSQuery) + } + return nil +} + +func (m *PFSQueriesMap) CheckAndGet(key string) (*PFSQuery, bool) { + v, ok := m.Load(key) + if ok { + return v.(*PFSQuery), true + } + return nil, false +} + +func (m *PFSQueriesMap) Set(key string, value *PFSQuery) { + m.Store(key, value) +} + +func (m *PFSQueriesMap) ToNormalMap(c map[string]*PFSQuery) { + // Clear the old values in the output map + for k := range c { + delete(c, k) + } + + // Insert all values from the PFSQueriesMap to the output map + m.Callback(func(key string, value *PFSQuery) bool { + c[key] = value + return true + }) +} + +func (m *PFSQueriesMap) ToNewMap() map[string]*PFSQuery { + result := make(map[string]*PFSQuery) + m.Range(func(k, v any) bool { + result[k.(string)] = v.(*PFSQuery) + return true + }) + return result +} + +func (m *PFSQueriesMap) Callback(f func(key string, value *PFSQuery) bool) { + m.Range(func(k, v any) bool { + return f(k.(string), v.(*PFSQuery)) + }) +} + +func (m *PFSQueriesMap) Clear() { + m.Range(func(key, value any) bool { + m.Delete(key.(string)) + return true + }) +} + +func FromNormalPFSMap(m *PFSQueriesMap, c map[string]PFSQuery) *PFSQueriesMap { + if m == nil { + m = NewPFSQueriesMap() + } else { + m.Clear() + } + + for k, v := range c { + m.Set(k, &v) + } + + return m +} + +func FromPFSQueriesMap(m *PFSQueriesMap, c *PFSQueriesMap) *PFSQueriesMap { + if m == nil { + m = NewPFSQueriesMap() + } else { + m.Clear() + } + + if c != nil { + c.Callback(func(key string, value *PFSQuery) bool { + m.Set(key, value) + return true + }) + } + + return m +} + +type PluginsMap struct { + *sync.Map +} + +func NewPluginsMap() *PluginsMap { + s := new(sync.Map) + m := &PluginsMap{Map: s} + return m +} + +func (m *PluginsMap) Get(key string) *Plugin { + if v, ok := m.Load(key); ok { + return v.(*Plugin) + } + return nil +} + +func (m *PluginsMap) CheckAndGet(key string) (*Plugin, bool) { + v, ok := m.Load(key) + if ok { + return v.(*Plugin), true + } + return nil, false +} + +func (m *PluginsMap) Set(key string, value *Plugin) { + m.Store(key, value) +} + +func (m *PluginsMap) ToNormalMap(c map[string]*Plugin) { + // Clear the old values in the output map + for k := range c { + delete(c, k) + } + + // Insert all values from the PluginsMap to the output map + m.Callback(func(key string, value *Plugin) bool { + c[key] = value + return true + }) +} + +func (m *PluginsMap) ToNewMap() map[string]*Plugin { + result := make(map[string]*Plugin) + m.Range(func(k, v any) bool { + result[k.(string)] = v.(*Plugin) + return true + }) + return result +} + +func (m *PluginsMap) Callback(f func(key string, value *Plugin) bool) { + m.Range(func(k, v any) bool { + return f(k.(string), v.(*Plugin)) + }) +} + +func (m *PluginsMap) Clear() { + m.Range(func(key, value any) bool { + m.Delete(key.(string)) + return true + }) +} + +func FromNormalPluginsMap(m *PluginsMap, c map[string]*Plugin) *PluginsMap { + if m == nil { + m = NewPluginsMap() + } else { + m.Clear() + } + + for k, v := range c { + m.Set(k, v) + } + + return m +} + +func FromPluginsMap(m *PluginsMap, c *PluginsMap) *PluginsMap { + if m == nil { + m = NewPluginsMap() + } else { + m.Clear() + } + + if c != nil { + c.Callback(func(key string, value *Plugin) bool { + m.Set(key, value) + return true + }) + } + + return m +} + +type GrantsMap struct { + *sync.Map +} + +func NewGrantsMap() *GrantsMap { + s := new(sync.Map) + m := &GrantsMap{Map: s} + return m +} + +func (m *GrantsMap) Get(key string) *Grant { + if v, ok := m.Load(key); ok { + return v.(*Grant) + } + return nil +} + +func (m *GrantsMap) CheckAndGet(key string) (*Grant, bool) { + v, ok := m.Load(key) + if ok { + return v.(*Grant), true + } + return nil, false +} + +func (m *GrantsMap) Set(key string, value *Grant) { + m.Store(key, value) +} + +func (m *GrantsMap) ToNormalMap(c map[string]*Grant) { + // Clear the old values in the output map + for k := range c { + delete(c, k) + } + + // Insert all values from the GrantsMap to the output map + m.Callback(func(key string, value *Grant) bool { + c[key] = value + return true + }) +} + +func (m *GrantsMap) ToNewMap() map[string]*Grant { + result := make(map[string]*Grant) + m.Range(func(k, v any) bool { + result[k.(string)] = v.(*Grant) + return true + }) + return result +} + +func (m *GrantsMap) Callback(f func(key string, value *Grant) bool) { + m.Range(func(k, v any) bool { + return f(k.(string), v.(*Grant)) + }) +} + +func (m *GrantsMap) Clear() { + m.Range(func(key, value any) bool { + m.Delete(key.(string)) + return true + }) +} + +func FromNormalGrantsMap(m *GrantsMap, c map[string]*Grant) *GrantsMap { + if m == nil { + m = NewGrantsMap() + } else { + m.Clear() + } + + for k, v := range c { + m.Set(k, v) + } + + return m +} + +func FromGrantsMap(m *GrantsMap, c *GrantsMap) *GrantsMap { + if m == nil { + m = NewGrantsMap() + } else { + m.Clear() + } + + if c != nil { + c.Callback(func(key string, value *Grant) bool { + m.Set(key, value) + return true + }) + } + + return m +} + +type TablesMap struct { + *sync.Map +} + +func (m *TablesMap) Get(key string) *Table { + if v, ok := m.Load(key); ok { + return v.(*Table) + } + return nil +} + +func (m *TablesMap) CheckAndGet(key string) (*Table, bool) { + v, ok := m.Load(key) + if ok { + return v.(*Table), true + } + return nil, false +} + +func (m *TablesMap) ToNormalMap(c map[string]*Table) { + // clear old value + c = make(map[string]*Table) + + // Insert all values to new map + m.Range(func(k any, v any) bool { + c[k.(string)] = v.(*Table) + return true + }) +} + +func (m *TablesMap) ToNewMap() map[string]*Table { + // clear old value + c := make(map[string]*Table) + + // Insert all values to new map + m.Range(func(k any, v any) bool { + c[k.(string)] = v.(*Table) + return true + }) + + return c +} + +func (m *TablesMap) Set(k string, v *Table) { + m.Store(k, v) +} + +func FromNormalTablesMap(m *TablesMap, c map[string]*Table) *TablesMap { + if m == nil { + m = NewTablesMap() + } else { + m.Clear() + } + + for k, v := range c { + m.Store(k, v) + } + + return m +} + +func FromTablesSyncMap(m *TablesMap, c *TablesMap) *TablesMap { + if m == nil { + m = NewTablesMap() + } else { + m.Clear() + } + + if c != nil { + c.Range(func(k any, v any) bool { + m.Store(k.(string), v.(*Table)) + return true + }) + } + + return m +} + +func (m *TablesMap) Callback(f func(key, value any) bool) { + m.Range(f) +} + +func (m *TablesMap) Clear() { + m.Range(func(key any, value any) bool { + k := key.(string) + m.Delete(k) + return true + }) +} + +func NewTablesMap() *TablesMap { + s := new(sync.Map) + m := &TablesMap{Map: s} + return m +} diff --git a/utils/s18log/httplog.go b/utils/s18log/httplog.go index 707f5966a..8aa1dfc56 100644 --- a/utils/s18log/httplog.go +++ b/utils/s18log/httplog.go @@ -28,6 +28,7 @@ type HttpMessage struct { Level string `json:"level"` Timestamp string `json:"timestamp"` Text string `json:"text"` + Module int `json:"module"` } func NewHttpLog(sz int) HttpLog { @@ -55,3 +56,16 @@ func (tl *HttpLog) Shift(e HttpMessage) { ns[0] = e tl.Buffer = append(ns, tl.Buffer[0:tl.Len]...) } + +func FromLogrusLevel(level uint32) string { + switch level { + case 5: + return "DEBUG" + case 4: + return "INFO" + case 3: + return "WARN" + default: + return "ERROR" + } +}