@@ -46,6 +46,7 @@ def test_pgpro434_1(self):
4646 self .add_instance (backup_dir , 'node' , node )
4747
4848 # Make backup
49+ sleep (5 )
4950 self .backup_node (backup_dir , 'node' , node )
5051 node .cleanup ()
5152
@@ -59,8 +60,9 @@ def test_pgpro434_1(self):
5960 self .del_test_dir (module_name , fname )
6061
6162 # @unittest.skip("skip")
63+ @unittest .expectedFailure
6264 def test_pgpro434_2 (self ):
63- """Check that timelines are correct"""
65+ """Check that timelines are correct. WAITING PGPRO-1053 for --immediate. replace time """
6466 fname = self .id ().split ('.' )[3 ]
6567 backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
6668 node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name , fname ),
@@ -81,15 +83,19 @@ def test_pgpro434_2(self):
8183 recovery_time = self .show_pb (backup_dir , 'node' , backup_id )["recovery-time" ]
8284 node .safe_psql (
8385 "postgres" ,
84- "insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256 ) i" )
86+ "insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,1 ) i" )
8587
8688 # SECOND TIMELIN
8789 node .cleanup ()
8890 self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
8991 node .start ()
92+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
93+ sleep (1 )
9094 if self .verbose :
9195 print ('Second timeline' )
9296 print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
97+ self .assertFalse (node .execute ("postgres" ,"select exists(select 1 from t_heap where id = 100501)" )[0 ][0 ],
98+ 'data after restore not equal to original data' )
9399 node .safe_psql (
94100 "postgres" ,
95101 "insert into t_heap select 2 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(100,200) i" )
@@ -103,6 +109,8 @@ def test_pgpro434_2(self):
103109 node .cleanup ()
104110 self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
105111 node .start ()
112+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
113+ sleep (1 )
106114 if self .verbose :
107115 print ('third timeline' )
108116 print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -120,6 +128,8 @@ def test_pgpro434_2(self):
120128 node .cleanup ()
121129 self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
122130 node .start ()
131+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
132+ sleep (1 )
123133 if self .verbose :
124134 print ('Fourth timeline' )
125135 print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -128,6 +138,8 @@ def test_pgpro434_2(self):
128138 node .cleanup ()
129139 self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
130140 node .start ()
141+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
142+ sleep (1 )
131143 if self .verbose :
132144 print ('Fifth timeline' )
133145 print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -136,6 +148,8 @@ def test_pgpro434_2(self):
136148 node .cleanup ()
137149 self .restore_node (backup_dir , 'node' , node , options = ["--time={0}" .format (recovery_time )])
138150 node .start ()
151+ while node .safe_psql ("postgres" , "select pg_is_in_recovery()" ) == 't\n ' :
152+ sleep (1 )
139153 if self .verbose :
140154 print ('Sixth timeline' )
141155 print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -147,7 +161,7 @@ def test_pgpro434_2(self):
147161 'data after restore not equal to original data' )
148162
149163 # Clean after yourself
150- # self.del_test_dir(module_name, fname)
164+ self .del_test_dir (module_name , fname )
151165
152166 # @unittest.skip("skip")
153167 def test_pgpro434_3 (self ):
@@ -219,7 +233,7 @@ def test_arhive_push_file_exists(self):
219233
220234 os .remove (file )
221235 sleep (5 )
222- node .safe_psql ('postgres' , 'select pg_switch_xlog ()' )
236+ node .safe_psql ('postgres' , 'select pg_switch_wal ()' )
223237
224238 with open (log_file , 'r' ) as f :
225239 log_content = f .read ()
@@ -229,9 +243,10 @@ def test_arhive_push_file_exists(self):
229243 # Clean after yourself
230244 self .del_test_dir (module_name , fname )
231245
232- #@unittest.expectedFailure
246+ # @unittest.expectedFailure
247+ # @unittest.skip("skip")
233248 def test_replica_archive (self ):
234- """make node withput archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
249+ """make node without archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
235250 fname = self .id ().split ('.' )[3 ]
236251 backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
237252 master = self .make_simple_node (base_dir = "{0}/{1}/master" .format (module_name , fname ),
@@ -240,6 +255,7 @@ def test_replica_archive(self):
240255 pg_options = {'wal_level' : 'replica' , 'max_wal_senders' : '2' , 'checkpoint_timeout' : '30s' }
241256 )
242257 self .init_pb (backup_dir )
258+ # ADD INSTANCE 'MASTER'
243259 self .add_instance (backup_dir , 'master' , master )
244260 # force more frequent wal switch
245261 master .start ()
@@ -258,7 +274,7 @@ def test_replica_archive(self):
258274 self .restore_node (backup_dir , 'master' , replica )
259275 self .set_replica (master , replica , synchronous = True )
260276 self .set_archiving (backup_dir , 'replica' , replica , replica = True )
261- replica .start ({ "-t" : "600" } )
277+ replica .start ()
262278
263279 # Check data correctness on replica
264280 after = replica .safe_psql ("postgres" , "SELECT * FROM t_heap" )
@@ -269,6 +285,7 @@ def test_replica_archive(self):
269285 "postgres" ,
270286 "insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(256,512) i" )
271287 before = master .safe_psql ("postgres" , "SELECT * FROM t_heap" )
288+ # ADD INSTANCE 'REPLICA'
272289 self .add_instance (backup_dir , 'replica' , replica )
273290 backup_id = self .backup_node (backup_dir , 'replica' , replica , options = ['--archive-timeout=30' ,
274291 '--master-host=localhost' , '--master-db=postgres' ,'--master-port={0}' .format (master .port )])
@@ -306,3 +323,62 @@ def test_replica_archive(self):
306323
307324 # Clean after yourself
308325 self .del_test_dir (module_name , fname )
326+
327+ # @unittest.expectedFailure
328+ # @unittest.skip("skip")
329+ def test_master_and_replica_concurrent_archiving (self ):
330+ """make node 'master 'with archiving, take archive backup and turn it into replica, set replica with archiving, make archive backup from replica, make archive backup from master"""
331+ fname = self .id ().split ('.' )[3 ]
332+ backup_dir = os .path .join (self .tmp_path , module_name , fname , 'backup' )
333+ master = self .make_simple_node (base_dir = "{0}/{1}/master" .format (module_name , fname ),
334+ set_replication = True ,
335+ initdb_params = ['--data-checksums' ],
336+ pg_options = {'wal_level' : 'replica' , 'max_wal_senders' : '2' , 'checkpoint_timeout' : '30s' }
337+ )
338+ replica = self .make_simple_node (base_dir = "{0}/{1}/replica" .format (module_name , fname ))
339+ replica .cleanup ()
340+
341+ self .init_pb (backup_dir )
342+ # ADD INSTANCE 'MASTER'
343+ self .add_instance (backup_dir , 'master' , master )
344+ self .set_archiving (backup_dir , 'master' , master )
345+ master .start ()
346+
347+ master .psql (
348+ "postgres" ,
349+ "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i" )
350+
351+ # TAKE FULL ARCHIVE BACKUP FROM MASTER
352+ self .backup_node (backup_dir , 'master' , master )
353+ # GET LOGICAL CONTENT FROM MASTER
354+ before = master .safe_psql ("postgres" , "SELECT * FROM t_heap" )
355+ # GET PHYSICAL CONTENT FROM MASTER
356+ pgdata_master = self .pgdata_content (master .data_dir )
357+
358+ # Settings for Replica
359+ self .restore_node (backup_dir , 'master' , replica )
360+ # CHECK PHYSICAL CORRECTNESS on REPLICA
361+ pgdata_replica = self .pgdata_content (replica .data_dir )
362+ self .compare_pgdata (pgdata_master , pgdata_replica )
363+
364+ self .set_replica (master , replica , synchronous = True )
365+ # ADD INSTANCE REPLICA
366+ self .add_instance (backup_dir , 'replica' , replica )
367+ # SET ARCHIVING FOR REPLICA
368+ self .set_archiving (backup_dir , 'replica' , replica , replica = True )
369+ replica .start ()
370+
371+ # CHECK LOGICAL CORRECTNESS on REPLICA
372+ after = replica .safe_psql ("postgres" , "SELECT * FROM t_heap" )
373+ self .assertEqual (before , after )
374+
375+ # TAKE FULL ARCHIVE BACKUP FROM REPLICA
376+ backup_id = self .backup_node (backup_dir , 'replica' , replica , options = ['--archive-timeout=30' ,
377+ '--master-host=localhost' , '--master-db=postgres' ,'--master-port={0}' .format (master .port )])
378+ self .validate_pb (backup_dir , 'replica' )
379+ self .assertEqual ('OK' , self .show_pb (backup_dir , 'replica' , backup_id )['status' ])
380+
381+ # TAKE FULL ARCHIVE BACKUP FROM MASTER
382+ backup_id = self .backup_node (backup_dir , 'master' , master )
383+ self .validate_pb (backup_dir , 'master' )
384+ self .assertEqual ('OK' , self .show_pb (backup_dir , 'master' , backup_id )['status' ])
0 commit comments