Skip to content

Commit 980ad1f

Browse files
committed
Merge branch 'master' of github.com:Altinity/clickhouse-backup into bug1291
2 parents 53655e9 + 1f702e8 commit 980ad1f

File tree

28 files changed

+296
-252
lines changed

28 files changed

+296
-252
lines changed

.gitignore

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,19 @@
1-
.vscode
1+
.vscode/
22
*.exe
33
debug
44
/clickhouse-backup
55
credentials.json
6-
.idea
6+
.idea/
77
*.iml
88
build/
99
.env
1010
*.log
11+
vendor/
12+
.aider*
13+
AGENTS.md
1114

1215
# TestFlows tests artifacts that shall not be stored in Git
1316
_instances/
1417
_coverage_/
1518
__pycache__/
16-
*.py[cod]
17-
vendor/
18-
.aider*
19+
*.py[cod]

pkg/backup/backup_shard.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@ package backup
22

33
import (
44
"context"
5-
"errors"
65
"fmt"
7-
"github.com/rs/zerolog/log"
86
"hash/fnv"
7+
8+
"github.com/pkg/errors"
9+
"github.com/rs/zerolog/log"
910
)
1011

1112
var (
@@ -170,7 +171,7 @@ func (rd *replicaDeterminer) getReplicaState(ctx context.Context) ([]tableReplic
170171
// TODO: Change query to pull replica_is_active after upgrading to clickhouse-go v2
171172
query := "SELECT t.database, t.name AS table, r.replica_name, arraySort(mapKeys(mapFilter((replica, active) -> (active == 1), r.replica_is_active))) AS active_replicas FROM system.tables t LEFT JOIN system.replicas r ON t.database = r.database AND t.name = r.table"
172173
if err := rd.q.SelectContext(ctx, &md, query); err != nil {
173-
return nil, fmt.Errorf("could not determine replication state: %w", err)
174+
return nil, errors.Wrap(err, "could not determine replication state")
174175
}
175176

176177
// Handle views and memory tables by putting in stand-in replication metadata

pkg/backup/backuper.go

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,19 @@ import (
44
"context"
55
"crypto/sha256"
66
"encoding/binary"
7-
"errors"
87
"fmt"
9-
"github.com/Altinity/clickhouse-backup/v2/pkg/common"
10-
"github.com/Altinity/clickhouse-backup/v2/pkg/metadata"
11-
"github.com/Altinity/clickhouse-backup/v2/pkg/utils"
12-
"github.com/eapache/go-resiliency/retrier"
138
"net/url"
149
"os"
1510
"path"
1611
"regexp"
1712
"strings"
1813

14+
"github.com/Altinity/clickhouse-backup/v2/pkg/common"
15+
"github.com/Altinity/clickhouse-backup/v2/pkg/metadata"
16+
"github.com/Altinity/clickhouse-backup/v2/pkg/utils"
17+
"github.com/eapache/go-resiliency/retrier"
18+
"github.com/pkg/errors"
19+
1920
"github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse"
2021
"github.com/Altinity/clickhouse-backup/v2/pkg/config"
2122
"github.com/Altinity/clickhouse-backup/v2/pkg/resumable"
@@ -117,7 +118,7 @@ func (b *Backuper) initDisksPathsAndBackupDestination(ctx context.Context, disks
117118
return err
118119
}
119120
if err := b.dst.Connect(ctx); err != nil {
120-
return fmt.Errorf("can't connect to %s: %v", b.dst.Kind(), err)
121+
return errors.Wrapf(err, "can't connect to %s", b.dst.Kind())
121122
}
122123
}
123124
return nil
@@ -166,7 +167,7 @@ func (b *Backuper) populateBackupShardField(ctx context.Context, tables []clickh
166167
// Parse shard config here to avoid error return in NewBackuper
167168
shardFunc, err := shardFuncByName(b.cfg.General.ShardedOperationMode)
168169
if err != nil {
169-
return fmt.Errorf("could not determine shards for tables: %w", err)
170+
return errors.Wrap(err, "could not determine shards for tables")
170171
}
171172
b.bs = newReplicaDeterminer(b.ch, shardFunc)
172173
}
@@ -417,7 +418,7 @@ func (b *Backuper) getTablesDiffFromRemote(ctx context.Context, diffFromRemote s
417418
tablesForUploadFromDiff = make(map[metadata.TableTitle]metadata.TableMetadata)
418419
backupList, err := b.dst.BackupList(ctx, true, diffFromRemote)
419420
if err != nil {
420-
return nil, fmt.Errorf("b.dst.BackupList return error: %v", err)
421+
return nil, errors.Wrap(err, "b.dst.BackupList return error")
421422
}
422423
var diffRemoteMetadata *metadata.BackupMetadata
423424
for _, backup := range backupList {
@@ -433,7 +434,7 @@ func (b *Backuper) getTablesDiffFromRemote(ctx context.Context, diffFromRemote s
433434
if len(diffRemoteMetadata.Tables) != 0 {
434435
diffTablesList, tableListErr := getTableListByPatternRemote(ctx, b, diffRemoteMetadata, tablePattern, false)
435436
if tableListErr != nil {
436-
return nil, fmt.Errorf("getTableListByPatternRemote return error: %v", tableListErr)
437+
return nil, errors.Wrap(tableListErr, "getTableListByPatternRemote return error")
437438
}
438439
for _, t := range diffTablesList {
439440
tablesForUploadFromDiff[metadata.TableTitle{
@@ -537,7 +538,7 @@ func (b *Backuper) calculateChecksum(disk *clickhouse.Disk, partName string) (ui
537538
checksumsFilePath := path.Join(disk.Path, partName, "checksums.txt")
538539
content, err := os.ReadFile(checksumsFilePath)
539540
if err != nil {
540-
return 0, fmt.Errorf("could not read %s: %w", checksumsFilePath, err)
541+
return 0, errors.Wrapf(err, "could not read %s", checksumsFilePath)
541542
}
542543

543544
hash := sha256.Sum256(content)

pkg/backup/create.go

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ package backup
33
import (
44
"context"
55
"encoding/json"
6-
"errors"
76
"fmt"
87
"os"
98
"path"
@@ -17,6 +16,7 @@ import (
1716
"github.com/Altinity/clickhouse-backup/v2/pkg/pidlock"
1817
"github.com/Altinity/clickhouse-backup/v2/pkg/resumable"
1918
"github.com/eapache/go-resiliency/retrier"
19+
"github.com/pkg/errors"
2020

2121
"github.com/google/uuid"
2222
recursiveCopy "github.com/otiai10/copy"
@@ -78,7 +78,7 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string,
7878
backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "")
7979

8080
if err := b.ch.Connect(); err != nil {
81-
return fmt.Errorf("can't connect to clickhouse: %v", err)
81+
return errors.Wrap(err, "can't connect to clickhouse")
8282
}
8383
defer b.ch.Close()
8484

@@ -106,11 +106,11 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string,
106106

107107
allDatabases, err := b.ch.GetDatabases(ctx, b.cfg, tablePattern)
108108
if err != nil {
109-
return fmt.Errorf("can't get database engines from clickhouse: %v", err)
109+
return errors.Wrap(err, "can't get database engines from clickhouse")
110110
}
111111
tables, err := b.GetTables(ctx, tablePattern)
112112
if err != nil {
113-
return fmt.Errorf("can't get tables from clickhouse: %v", err)
113+
return errors.Wrap(err, "can't get tables from clickhouse")
114114
}
115115

116116
if b.CalculateNonSkipTables(tables) == 0 && !b.cfg.General.AllowEmptyBackups {
@@ -119,7 +119,7 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string,
119119

120120
allFunctions, err := b.ch.GetUserDefinedFunctions(ctx)
121121
if err != nil {
122-
return fmt.Errorf("GetUserDefinedFunctions return error: %v", err)
122+
return errors.Wrap(err, "GetUserDefinedFunctions return error")
123123
}
124124

125125
disks, err := b.ch.GetDisks(ctx, false)
@@ -274,7 +274,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe
274274
return err
275275
}
276276
if err = b.dst.Connect(ctx); err != nil {
277-
return fmt.Errorf("can't connect to %s: %v", b.dst.Kind(), err)
277+
return errors.Wrapf(err, "can't connect to %s", b.dst.Kind())
278278
}
279279
defer func() {
280280
if closeErr := b.dst.Close(ctx); closeErr != nil {
@@ -297,7 +297,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe
297297
var diffFromRemoteErr error
298298
tablesDiffFromRemote, diffFromRemoteErr = b.getTablesDiffFromRemote(ctx, diffFromRemote, tablePattern)
299299
if diffFromRemoteErr != nil {
300-
return fmt.Errorf("b.getTablesDiffFromRemote return error: %v", diffFromRemoteErr)
300+
return errors.Wrap(diffFromRemoteErr, "b.getTablesDiffFromRemote return error")
301301
}
302302
}
303303

@@ -378,12 +378,12 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe
378378
})
379379
}
380380
if wgWaitErr := createBackupWorkingGroup.Wait(); wgWaitErr != nil {
381-
return fmt.Errorf("one of createBackupLocal go-routine return error: %v", wgWaitErr)
381+
return errors.Wrap(wgWaitErr, "one of createBackupLocal go-routine return error")
382382
}
383383

384384
backupMetaFile := path.Join(b.DefaultDataPath, "backup", backupName, "metadata.json")
385385
if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, diffFromRemote, backupVersion, "regular", diskMap, diskTypes, disks, backupDataSize, backupObjectDiskSize, backupMetadataSize, backupRBACSize, backupConfigSize, backupNamedCollectionsSize, tableMetas, allDatabases, allFunctions); err != nil {
386-
return fmt.Errorf("createBackupMetadata return error: %v", err)
386+
return errors.Wrap(err, "createBackupMetadata return error")
387387
}
388388
log.Info().Str("version", backupVersion).Str("operation", "createBackupLocal").Str("duration", utils.HumanizeDuration(time.Since(startBackup))).Msg("done")
389389
return nil
@@ -429,7 +429,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBac
429429
}
430430
backupResult := make([]clickhouse.SystemBackups, 0)
431431
if err := b.ch.SelectContext(ctx, &backupResult, backupSQL); err != nil {
432-
return fmt.Errorf("backup error: %v", err)
432+
return errors.Wrap(err, "backup error")
433433
}
434434
if len(backupResult) != 1 || (backupResult[0].Status != "BACKUP_COMPLETE" && backupResult[0].Status != "BACKUP_CREATED") {
435435
return fmt.Errorf("backup return wrong results: %+v", backupResult)
@@ -474,7 +474,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBac
474474
return err
475475
}
476476
if err = b.dst.Connect(ctx); err != nil {
477-
return fmt.Errorf("createBackupEmbedded: can't connect to %s: %v", b.dst.Kind(), err)
477+
return errors.Wrapf(err, "createBackupEmbedded: can't connect to %s", b.dst.Kind())
478478
}
479479
defer func() {
480480
if closeErr := b.dst.Close(ctx); closeErr != nil {
@@ -721,11 +721,11 @@ func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disk
721721
rbacBackup := path.Join(backupPath, "access")
722722
replicatedRBACDataSize, err := b.createBackupRBACReplicated(ctx, rbacBackup)
723723
if err != nil {
724-
return 0, fmt.Errorf("b.createBackupRBACReplicated error: %v", err)
724+
return 0, errors.Wrap(err, "b.createBackupRBACReplicated error")
725725
}
726726
accessPath, err := b.ch.GetAccessManagementPath(ctx, disks)
727727
if err != nil {
728-
return 0, fmt.Errorf("b.ch.GetAccessManagementPath error: %v", err)
728+
return 0, errors.Wrap(err, "b.ch.GetAccessManagementPath error")
729729
}
730730
accessPathInfo, err := os.Stat(accessPath)
731731
if err != nil && !os.IsNotExist(err) {
@@ -851,7 +851,7 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st
851851
for _, userDirectory := range replicatedRBAC {
852852
replicatedAccessPath, err := k.GetReplicatedAccessPath(userDirectory.Name)
853853
if err != nil {
854-
return 0, fmt.Errorf("k.GetReplicatedAccessPath(%s) error: %v", userDirectory.Name, err)
854+
return 0, errors.Wrapf(err, "k.GetReplicatedAccessPath(%s) error", userDirectory.Name)
855855
}
856856
rbacUUIDObjectsCount, err := k.ChildCount(replicatedAccessPath, "uuid")
857857
if err != nil {
@@ -1063,7 +1063,7 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string,
10631063
return nil
10641064
})
10651065
if copyObjectErr != nil {
1066-
return fmt.Errorf("b.dst.CopyObject in %s error: %v", backupShadowPath, copyObjectErr)
1066+
return errors.Wrapf(copyObjectErr, "b.dst.CopyObject in %s error", backupShadowPath)
10671067
}
10681068
} else {
10691069
if !isCopyFailed.Load() {
@@ -1079,7 +1079,7 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string,
10791079
return object_disk.CopyObjectStreaming(uploadCtx, srcDiskConnection.GetRemoteStorage(), b.dst, srcKey, path.Join(objectDiskPath, dstKey))
10801080
})
10811081
if copyObjectErr != nil {
1082-
return fmt.Errorf("object_disk.CopyObjectStreaming in %s error: %v", backupShadowPath, copyObjectErr)
1082+
return errors.Wrapf(copyObjectErr, "object_disk.CopyObjectStreaming in %s error", backupShadowPath)
10831083
}
10841084
}
10851085
objSize = storageObject.ObjectSize
@@ -1103,7 +1103,7 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string,
11031103
}
11041104

11051105
if wgWaitErr := uploadObjectDiskPartsWorkingGroup.Wait(); wgWaitErr != nil {
1106-
return 0, fmt.Errorf("one of uploadObjectDiskParts go-routine return error: %v", wgWaitErr)
1106+
return 0, errors.Wrap(wgWaitErr, "one of uploadObjectDiskParts go-routine return error")
11071107
}
11081108
return size, nil
11091109
}
@@ -1140,7 +1140,7 @@ func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, bac
11401140
}
11411141
content, err := json.MarshalIndent(&backupMetadata, "", "\t")
11421142
if err != nil {
1143-
return fmt.Errorf("can't marshal backup metafile json: %v", err)
1143+
return errors.Wrap(err, "can't marshal backup metafile json")
11441144
}
11451145
if err := os.WriteFile(backupMetaFile, content, 0640); err != nil {
11461146
return err
@@ -1164,10 +1164,10 @@ func (b *Backuper) createTableMetadata(metadataPath string, table metadata.Table
11641164
metadataFile := path.Join(metadataDatabasePath, fmt.Sprintf("%s.json", common.TablePathEncode(table.Table)))
11651165
metadataBody, err := json.MarshalIndent(&table, "", " ")
11661166
if err != nil {
1167-
return 0, fmt.Errorf("can't marshal %s: %v", MetaFileName, err)
1167+
return 0, errors.Wrapf(err, "can't marshal %s", MetaFileName)
11681168
}
11691169
if err := os.WriteFile(metadataFile, metadataBody, 0644); err != nil {
1170-
return 0, fmt.Errorf("can't create %s: %v", MetaFileName, err)
1170+
return 0, errors.Wrapf(err, "can't create %s", MetaFileName)
11711171
}
11721172
if err := filesystemhelper.Chown(metadataFile, b.ch, disks, false); err != nil {
11731173
return 0, err

pkg/backup/delete.go

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,15 @@ package backup
33
import (
44
"context"
55
"fmt"
6-
"github.com/Altinity/clickhouse-backup/v2/pkg/pidlock"
76
"io/fs"
87
"os"
98
"path"
109
"path/filepath"
1110
"strings"
1211
"time"
1312

13+
"github.com/Altinity/clickhouse-backup/v2/pkg/pidlock"
14+
1415
"github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse"
1516
"github.com/Altinity/clickhouse-backup/v2/pkg/custom"
1617
"github.com/Altinity/clickhouse-backup/v2/pkg/status"
@@ -25,7 +26,7 @@ import (
2526
// Clean - removed all data in shadow folder
2627
func (b *Backuper) Clean(ctx context.Context) error {
2728
if err := b.ch.Connect(); err != nil {
28-
return fmt.Errorf("can't connect to clickhouse: %v", err)
29+
return errors.Wrap(err, "can't connect to clickhouse")
2930
}
3031
defer b.ch.Close()
3132

@@ -39,7 +40,7 @@ func (b *Backuper) Clean(ctx context.Context) error {
3940
}
4041
shadowDir := path.Join(disk.Path, "shadow")
4142
if err := b.cleanDir(shadowDir); err != nil {
42-
return fmt.Errorf("can't clean '%s': %v", shadowDir, err)
43+
return errors.Wrapf(err, "can't clean '%s'", shadowDir)
4344
}
4445
log.Info().Msg(shadowDir)
4546
}
@@ -117,7 +118,7 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis
117118
start := time.Now()
118119
backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "")
119120
if err = b.ch.Connect(); err != nil {
120-
return fmt.Errorf("can't connect to clickhouse: %v", err)
121+
return errors.Wrap(err, "can't connect to clickhouse")
121122
}
122123
defer b.ch.Close()
123124
if disks == nil {
@@ -142,7 +143,7 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis
142143
}
143144
err = bd.Connect(ctx)
144145
if err != nil {
145-
return fmt.Errorf("can't connect to remote storage: %v", err)
146+
return errors.Wrap(err, "can't connect to remote storage")
146147
}
147148
defer func() {
148149
if err := bd.Close(ctx); err != nil {
@@ -279,7 +280,7 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er
279280
return custom.DeleteRemote(ctx, b.cfg, backupName)
280281
}
281282
if err := b.ch.Connect(); err != nil {
282-
return fmt.Errorf("can't connect to clickhouse: %v", err)
283+
return errors.Wrap(err, "can't connect to clickhouse")
283284
}
284285
defer b.ch.Close()
285286

@@ -289,7 +290,7 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er
289290
}
290291
err = bd.Connect(ctx)
291292
if err != nil {
292-
return fmt.Errorf("can't connect to remote storage: %v", err)
293+
return errors.Wrap(err, "can't connect to remote storage")
293294
}
294295
defer func() {
295296
if err := bd.Close(ctx); err != nil {
@@ -472,14 +473,14 @@ func (b *Backuper) cleanPartialRequiredBackup(ctx context.Context, disks []click
472473
for _, localBackup := range localBackups {
473474
if localBackup.BackupName != currentBackupName && localBackup.DataSize+localBackup.CompressedSize+localBackup.MetadataSize+localBackup.RBACSize == 0 {
474475
if err = b.RemoveBackupLocal(ctx, localBackup.BackupName, disks); err != nil {
475-
return fmt.Errorf("CleanPartialRequiredBackups %s -> RemoveBackupLocal cleaning error: %v", localBackup.BackupName, err)
476+
return errors.Wrapf(err, "CleanPartialRequiredBackups %s -> RemoveBackupLocal cleaning error", localBackup.BackupName)
476477
} else {
477478
log.Info().Msgf("CleanPartialRequiredBackups %s deleted", localBackup.BackupName)
478479
}
479480
}
480481
}
481482
} else {
482-
return fmt.Errorf("CleanPartialRequiredBackups -> GetLocalBackups cleaning error: %v", err)
483+
return errors.Wrap(err, "CleanPartialRequiredBackups -> GetLocalBackups cleaning error")
483484
}
484485
return nil
485486
}

0 commit comments

Comments
 (0)