@@ -3,7 +3,7 @@ package backup;
33
44import "kvrpcpb.proto" ;
55import "errorpb.proto" ;
6-
6+ import "encryptionpb.proto" ;
77import "gogoproto/gogo.proto" ;
88import "rustproto.proto" ;
99
@@ -19,11 +19,19 @@ message BackupMeta {
1919 // ID and version of backuped cluster.
2020 uint64 cluster_id = 1 ;
2121 string cluster_version = 2 ;
22+ // Save the version of BR running backup jobs.
23+ string br_version = 11 ;
24+ // The backupmeta scheme version.
25+ int32 version = 12 ;
2226
2327 // path field is no longer used.
2428 reserved 3 ; reserved "path" ;
2529 // A set of files that compose a backup.
30+ // Note: `files` is deprecated, as it bloats backupmeta. It is kept for
31+ // compatibility, so new BR can restore older backups.
2632 repeated File files = 4 ;
33+ // An index to files contains data files.
34+ MetaFile file_index = 13 ;
2735
2836 // A pair of timestamp specifies a time range of a backup.
2937 // For full backup, the start_version equals to the end_version,
@@ -33,19 +41,34 @@ message BackupMeta {
3341 uint64 start_version = 5 ;
3442 uint64 end_version = 6 ;
3543
36- // Additional metadata describes database and table info.
44+ // Table metadata describes database and table info.
45+ // Note: `schemas` is deprecated, as it bloats backupmeta. It is kept for
46+ // compatibility, so new BR can restore older backups.
3747 repeated Schema schemas = 7 ;
48+ // An index to files contains Schemas.
49+ MetaFile schema_index = 14 ;
3850
39- // If in raw kv mode, `start_versions`, `end_versions` and `schemas` will be ignored, and the
40- // backup data's range is represented by raw_ranges.
51+ // If in raw kv mode, `start_versions`, `end_versions` and `schemas` will be
52+ // ignored, and the backup data's range is represented by raw_ranges.
4153 bool is_raw_kv = 8 ;
54+ // Note: `raw_ranges` is deprecated, as it bloats backupmeta. It is kept for
55+ // compatibility, so new BR can restore older backups.
4256 repeated RawRange raw_ranges = 9 ;
57+ // An index to files contains RawRanges.
58+ MetaFile raw_range_index = 15 ;
4359
44- // In incremental backup, DDLs which are completed in (lastBackupTS, backupTS] will be stored here.
60+ // In incremental backup, DDLs which are completed in
61+ // (lastBackupTS, backupTS] will be stored here.
62+ // Note: `raw_ranges` is deprecated, as it bloats backupmeta. It is kept for
63+ // compatibility, so new BR can restore older backups.
4564 bytes ddls = 10 ;
65+ // An index to files contains DDLs.
66+ MetaFile ddl_indexes = 16 ;
67+ // the backup result into `backupmeta` file
68+ string backup_result = 17 ;
4669
47- // Save the version of BR running backup jobs .
48- string br_version = 11 ;
70+ // API version implies the encode of the key and value .
71+ kvrpcpb.APIVersion api_version = 18 ;
4972}
5073
5174message File {
@@ -64,6 +87,23 @@ message File {
6487 string cf = 10 ;
6588
6689 uint64 size = 11 ;
90+ // cipher_iv is used for AES cipher
91+ bytes cipher_iv = 12 ;
92+ }
93+
94+ // MetaFile describes a multi-level index of data used in backup.
95+ message MetaFile {
96+ // A set of files that contains a MetaFile.
97+ // It is used as a multi-level index.
98+ repeated File meta_files = 1 ;
99+ // A set of files that contains user data.
100+ repeated File data_files = 2 ;
101+ // A set of files that contains Schemas.
102+ repeated Schema schemas = 3 ;
103+ // A set of files that contains RawRanges.
104+ repeated RawRange raw_ranges = 4 ;
105+ // A set of files that contains DDLs.
106+ repeated bytes ddls = 5 ;
67107}
68108
69109message Schema {
@@ -109,6 +149,11 @@ enum CompressionType {
109149 ZSTD = 3 ;
110150}
111151
152+ message CipherInfo {
153+ encryptionpb.EncryptionMethod cipher_type = 1 ;
154+ bytes cipher_key = 2 ;
155+ }
156+
112157message BackupRequest {
113158 uint64 cluster_id = 1 ;
114159
@@ -135,6 +180,8 @@ message BackupRequest {
135180 CompressionType compression_type = 12 ;
136181 // sst compression level, some algorithms support negative compression levels
137182 int32 compression_level = 13 ;
183+ // The cipher_info is Used to encrypt sst
184+ CipherInfo cipher_info = 14 ;
138185}
139186
140187message StorageBackend {
@@ -144,6 +191,8 @@ message StorageBackend {
144191 S3 s3 = 3 ;
145192 GCS gcs = 4 ;
146193 CloudDynamic cloud_dynamic = 5 ;
194+ HDFS hdfs = 6 ;
195+ AzureBlobStorage azure_blob_storage = 7 ;
147196 }
148197}
149198
@@ -188,6 +237,29 @@ message GCS {
188237 string credentials_blob = 6 ;
189238}
190239
240+ // AzureBlobStorage storage backend saves files into azure blob storage.
241+ message AzureBlobStorage {
242+ string endpoint = 1 ;
243+ // Alias: container
244+ string bucket = 2 ;
245+ // Notice: prefix starts without `/`, otherwise the first directory's name is empty.
246+ string prefix = 3 ;
247+ // Alias: access_tier.
248+ // See https://docs.microsoft.com/en-us/azure/storage/blobs/access-tiers-overview
249+ string storage_class = 4 ;
250+
251+ // if empty, try to read account_name from the node's environment variable $AZURE_STORAGE_ACCOUNT.
252+ string account_name = 5 ;
253+ // Use shared key to access the azure blob
254+ // If the node's environment variables($AZURE_CLIENT_ID, $AZURE_TENANT_ID, $AZURE_CLIENT_SECRET) exist,
255+ // prefer to use token to access the azure blob.
256+ //
257+ // See https://docs.microsoft.com/en-us/azure/storage/common/identity-library-acquire-token?toc=/azure/storage/blobs/toc.json
258+ //
259+ // Otherwise, if empty, try to read shared key from the node's environment variable $AZURE_STORAGE_KEY.
260+ string shared_key = 6 ;
261+ }
262+
191263message Bucket {
192264 string endpoint = 1 ;
193265 string region = 3 ;
@@ -199,10 +271,16 @@ message Bucket {
199271// CloudDynamic allows testing new cloud providers and new fields without changing protobuf definitions
200272message CloudDynamic {
201273 Bucket bucket = 1 ;
202- string provider_name = 2 ; // s3 and gcs are supported
274+ string provider_name = 2 ; // s3, gcs and azureBlobStorage are supported
203275 map <string , string > attrs = 3 ;
204276}
205277
278+ // HDFS storage backend saves file into HDFS compatible storages
279+ message HDFS {
280+ // a URL: hdfs:///some/path or hdfs://host:port/some/path
281+ string remote = 1 ;
282+ }
283+
206284
207285message BackupResponse {
208286 Error error = 1 ;
0 commit comments