11import unittest
2- from os import path , listdir
2+ import os
33import six
4+ from time import sleep
45from helpers .ptrack_helpers import ProbackupTest , ProbackupException
56from testgres import stop_all
67
@@ -9,6 +10,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
910
1011 def __init__ (self , * args , ** kwargs ):
1112 super (BackupTest , self ).__init__ (* args , ** kwargs )
13+ self .module_name = 'backup'
1214
1315 @classmethod
1416 def tearDownClass (cls ):
@@ -20,132 +22,198 @@ def tearDownClass(cls):
2022 def test_backup_modes_archive (self ):
2123 """standart backup modes with ARCHIVE WAL method"""
2224 fname = self .id ().split ('.' )[3 ]
23- node = self .make_simple_node (base_dir = "tmp_dirs/backup/{0}" .format (fname ),
24- set_archiving = True ,
25+ node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (self .module_name , fname ),
2526 initdb_params = ['--data-checksums' ],
2627 pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' }
2728 )
29+ backup_dir = os .path .join (self .tmp_path , self .module_name , fname , 'backup' )
30+ self .init_pb (backup_dir )
31+ self .add_instance (backup_dir , 'node' , node )
32+ self .set_archiving (backup_dir , 'node' , node )
2833 node .start ()
29- self .assertEqual (self .init_pb (node ), six .b ("" ))
3034
3135 # full backup mode
3236 #with open(path.join(node.logs_dir, "backup_full.log"), "wb") as backup_log:
33- # backup_log.write(self.backup_pb (node, options=["--verbose"]))
37+ # backup_log.write(self.backup_node (node, options=["--verbose"]))
3438
35- self .backup_pb (node )
39+ backup_id = self .backup_node (backup_dir , 'node' , node )
40+ show_backup = self .show_pb (backup_dir , 'node' )[0 ]
3641
37- show_backup = self .show_pb (node )[0 ]
38- full_backup_id = show_backup ['ID' ]
3942 self .assertEqual (show_backup ['Status' ], six .b ("OK" ))
4043 self .assertEqual (show_backup ['Mode' ], six .b ("FULL" ))
4144
4245 # postmaster.pid and postmaster.opts shouldn't be copied
4346 excluded = True
44- backups_dir = path .join (self .backup_dir (node ), "backups" )
45- for backup in listdir (backups_dir ):
46- db_dir = path .join (backups_dir , backup , "database" )
47- for f in listdir (db_dir ):
48- if path .isfile (path .join (db_dir , f )) and \
49- (f == "postmaster.pid" or f == "postmaster.opts" ):
47+ db_dir = os .path .join (backup_dir , "backups" , 'node' , backup_id , "database" )
48+ for f in os .listdir (db_dir ):
49+ if os .path .isfile (os .path .join (db_dir , f )) \
50+ and (f == "postmaster.pid" or f == "postmaster.opts" ):
5051 excluded = False
5152 self .assertEqual (excluded , True )
5253
5354 # page backup mode
54- self .backup_pb ( node , backup_type = "page" )
55+ page_backup_id = self .backup_node ( backup_dir , 'node' , node , backup_type = "page" )
5556
5657 # print self.show_pb(node)
57- show_backup = self .show_pb (node )[1 ]
58+ show_backup = self .show_pb (backup_dir , ' node' )[1 ]
5859 self .assertEqual (show_backup ['Status' ], six .b ("OK" ))
5960 self .assertEqual (show_backup ['Mode' ], six .b ("PAGE" ))
6061
6162 # Check parent backup
6263 self .assertEqual (
63- full_backup_id ,
64- self .show_pb (node , id = show_backup ['ID' ])["parent-backup-id" ])
64+ backup_id ,
65+ self .show_pb (backup_dir , ' node' , backup_id = show_backup ['ID' ])["parent-backup-id" ])
6566
6667 # ptrack backup mode
67- self .backup_pb ( node , backup_type = "ptrack" )
68+ self .backup_node ( backup_dir , 'node' , node , backup_type = "ptrack" )
6869
69- show_backup = self .show_pb (node )[2 ]
70+ show_backup = self .show_pb (backup_dir , ' node' )[2 ]
7071 self .assertEqual (show_backup ['Status' ], six .b ("OK" ))
7172 self .assertEqual (show_backup ['Mode' ], six .b ("PTRACK" ))
7273
74+ # Check parent backup
75+ self .assertEqual (
76+ page_backup_id ,
77+ self .show_pb (backup_dir , 'node' , backup_id = show_backup ['ID' ])["parent-backup-id" ])
78+
7379 node .stop ()
7480
81+ # @unittest.skip("skip")
7582 def test_smooth_checkpoint (self ):
7683 """full backup with smooth checkpoint"""
7784 fname = self .id ().split ('.' )[3 ]
78- node = self .make_simple_node (base_dir = "tmp_dirs/backup/{0}" .format (fname ),
79- set_archiving = True ,
85+ node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (self .module_name , fname ),
8086 initdb_params = ['--data-checksums' ],
8187 pg_options = {'wal_level' : 'replica' }
8288 )
89+ backup_dir = os .path .join (self .tmp_path , self .module_name , fname , 'backup' )
90+ self .init_pb (backup_dir )
91+ self .add_instance (backup_dir , 'node' , node )
92+ self .set_archiving (backup_dir , 'node' , node )
93+ node .start ()
94+
95+ self .backup_node (backup_dir , 'node' ,node , options = ["-C" ])
96+ self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], six .b ("OK" ))
97+ node .stop ()
98+
99+ #@unittest.skip("skip")
100+ def test_incremental_backup_without_full (self ):
101+ """page-level backup without validated full backup"""
102+ fname = self .id ().split ('.' )[3 ]
103+ node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (self .module_name , fname ),
104+ initdb_params = ['--data-checksums' ],
105+ pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' }
106+ )
107+ backup_dir = os .path .join (self .tmp_path , self .module_name , fname , 'backup' )
108+ self .init_pb (backup_dir )
109+ self .add_instance (backup_dir , 'node' , node )
110+ self .set_archiving (backup_dir , 'node' , node )
83111 node .start ()
84- self .assertEqual (self .init_pb (node ), six .b ("" ))
85112
86- self .backup_pb (node , options = ["-C" ])
113+ try :
114+ self .backup_node (backup_dir , 'node' , node , backup_type = "page" )
115+ # we should die here because exception is what we expect to happen
116+ self .assertEqual (1 , 0 , "Expecting Error because page backup should not be possible without valid full backup.\n Output: {0} \n CMD: {1}" .format (
117+ repr (self .output ), self .cmd ))
118+ except ProbackupException , e :
119+ self .assertEqual (e .message ,
120+ 'ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one.\n ' ,
121+ '\n Unexpected Error Message: {0}\n CMD: {1}' .format (repr (e .message ), self .cmd ))
87122
88- self . assertEqual ( self . show_pb ( node )[ 0 ][ 'Status' ], six . b ( "OK" ) )
123+ sleep ( 1 )
89124
125+ try :
126+ self .backup_node (backup_dir , 'node' , node , backup_type = "ptrack" )
127+ # we should die here because exception is what we expect to happen
128+ self .assertEqual (1 , 0 , "Expecting Error because page backup should not be possible without valid full backup.\n Output: {0} \n CMD: {1}" .format (
129+ repr (self .output ), self .cmd ))
130+ except ProbackupException , e :
131+ self .assertEqual (e .message ,
132+ 'ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one.\n ' ,
133+ '\n Unexpected Error Message: {0}\n CMD: {1}' .format (repr (e .message ), self .cmd ))
134+
135+ self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], six .b ("ERROR" ))
90136 node .stop ()
91137
92- def test_page_backup_without_full (self ):
93- """page-level backup without validated full backup"""
138+ @unittest .expectedFailure
139+ # Need to forcibly validate parent
140+ def test_incremental_backup_corrupt_full (self ):
141+ """page-level backup with corrupted full backup"""
94142 fname = self .id ().split ('.' )[3 ]
95- node = self .make_simple_node (base_dir = "tmp_dirs/backup/{0}" .format (fname ),
96- set_archiving = True ,
143+ node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (self .module_name , fname ),
97144 initdb_params = ['--data-checksums' ],
98- pg_options = {'wal_level' : 'replica' }
145+ pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' }
99146 )
147+ backup_dir = os .path .join (self .tmp_path , self .module_name , fname , 'backup' )
148+ self .init_pb (backup_dir )
149+ self .add_instance (backup_dir , 'node' , node )
150+ self .set_archiving (backup_dir , 'node' , node )
100151 node .start ()
101- self .assertEqual (self .init_pb (node ), six .b ("" ))
152+
153+ backup_id = self .backup_node (backup_dir , 'node' , node )
154+ file = os .path .join (backup_dir , "backups" , "node" , backup_id .decode ("utf-8" ), "database" , "postgresql.conf" )
155+ os .remove (file )
102156
103157 try :
104- self .backup_pb (node , backup_type = "page" )
158+ self .backup_node (backup_dir , 'node' , node , backup_type = "page" )
159+ # we should die here because exception is what we expect to happen
160+ self .assertEqual (1 , 0 , "Expecting Error because page backup should not be possible without valid full backup.\n Output: {0} \n CMD: {1}" .format (
161+ repr (self .output ), self .cmd ))
162+ except ProbackupException , e :
163+ self .assertEqual (e .message ,
164+ 'ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one.\n ' ,
165+ '\n Unexpected Error Message: {0}\n CMD: {1}' .format (repr (e .message ), self .cmd ))
166+
167+ sleep (1 )
168+ self .assertEqual (1 , 0 , "Expecting Error because page backup should not be possible without valid full backup.\n Output: {0} \n CMD: {1}" .format (
169+ repr (self .output ), self .cmd ))
105170 except ProbackupException , e :
106- pass
107- self .assertEqual (self .show_pb (node )[0 ]['Status' ], six .b ("ERROR" ))
171+ self .assertEqual (e .message ,
172+ 'ERROR: Valid backup on current timeline is not found. Create new FULL backup before an incremental one.\n ' ,
173+ '\n Unexpected Error Message: {0}\n CMD: {1}' .format (repr (e .message ), self .cmd ))
108174
175+ self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], six .b ("ERROR" ))
109176 node .stop ()
110177
178+ # @unittest.skip("skip")
111179 def test_ptrack_threads (self ):
112180 """ptrack multi thread backup mode"""
113181 fname = self .id ().split ('.' )[3 ]
114- node = self .make_simple_node (base_dir = "tmp_dirs/backup/{0}" .format (fname ),
115- set_archiving = True ,
182+ node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (self .module_name , fname ),
116183 initdb_params = ['--data-checksums' ],
117- pg_options = {'wal_level' : 'replica' , " ptrack_enable" : "on" , 'max_wal_senders' : '2 ' }
184+ pg_options = {'wal_level' : 'replica' , ' ptrack_enable' : 'on ' }
118185 )
186+ backup_dir = os .path .join (self .tmp_path , self .module_name , fname , 'backup' )
187+ self .init_pb (backup_dir )
188+ self .add_instance (backup_dir , 'node' , node )
189+ self .set_archiving (backup_dir , 'node' , node )
119190 node .start ()
120- self .assertEqual (self .init_pb (node ), six .b ("" ))
121-
122- self .backup_pb (node , backup_type = "full" , options = ["-j" , "4" ])
123-
124- self .assertEqual (self .show_pb (node )[0 ]['Status' ], six .b ("OK" ))
125191
126- with open ( path . join ( node . logs_dir , "backup_ptrack.log" ), "wb" ) as backup_log :
127- backup_log . write (self .backup_pb ( node , backup_type = "ptrack" , options = [ "-j" , "4" ] ))
192+ self . backup_node ( backup_dir , ' node' , node , backup_type = "full" , options = [ "-j" , "4" ])
193+ self . assertEqual (self .show_pb ( backup_dir , 'node' )[ 0 ][ 'Status' ], six . b ( "OK" ))
128194
129- self .assertEqual (self .show_pb (node )[0 ]['Status' ], six .b ("OK" ))
195+ self .backup_node (backup_dir , 'node' , node , backup_type = "ptrack" , options = ["-j" , "4" ])
196+ self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], six .b ("OK" ))
130197
131198 node .stop ()
132199
200+ # @unittest.skip("skip")
133201 def test_ptrack_threads_stream (self ):
134202 """ptrack multi thread backup mode and stream"""
135203 fname = self .id ().split ('.' )[3 ]
136- node = self .make_simple_node (base_dir = "tmp_dirs/backup/ {0}" .format (fname ),
204+ node = self .make_simple_node (base_dir = "{0}/{1}/node " .format (self . module_name , fname ),
137205 set_replication = True ,
138206 initdb_params = ['--data-checksums' ],
139207 pg_options = {'wal_level' : 'replica' , 'ptrack_enable' : 'on' , 'max_wal_senders' : '2' }
140208 )
209+ backup_dir = os .path .join (self .tmp_path , self .module_name , fname , 'backup' )
210+ self .init_pb (backup_dir )
211+ self .add_instance (backup_dir , 'node' , node )
141212 node .start ()
142- self .assertEqual (self .init_pb (node ), six .b ("" ))
143-
144- self .backup_pb (node , backup_type = "full" , options = ["-j" , "4" , "--stream" ])
145-
146- self .assertEqual (self .show_pb (node )[0 ]['Status' ], six .b ("OK" ))
147213
148- self .backup_pb ( node , backup_type = "ptrack " , options = ["-j" , "4" , "--stream" ])
214+ self .backup_node ( backup_dir , ' node' , node , backup_type = "full " , options = ["-j" , "4" , "--stream" ])
149215
150- self .assertEqual (self .show_pb (node )[1 ]['Status' ], six .b ("OK" ))
216+ self .assertEqual (self .show_pb (backup_dir , 'node' )[0 ]['Status' ], six .b ("OK" ))
217+ self .backup_node (backup_dir , 'node' , node , backup_type = "ptrack" , options = ["-j" , "4" , "--stream" ])
218+ self .assertEqual (self .show_pb (backup_dir , 'node' )[1 ]['Status' ], six .b ("OK" ))
151219 node .stop ()
0 commit comments