@@ -36,6 +36,7 @@ def __init__(self, config, source_dir, key_prefix):
3636 self .chunk_size_mb = self .config .upload .s3 .chunk_size_mb
3737 self .chunk_size = self .chunk_size_mb * 1024 * 1024
3838
39+ self .completed = False
3940 self .timer_name = self .__class__ .__name__
4041 self ._pool = None
4142 self ._multipart = None
@@ -51,74 +52,75 @@ def __init__(self, config, source_dir, key_prefix):
5152 def run (self ):
5253 if not os .path .isdir (self .source_dir ):
5354 logging .error ("The source directory: %s does not exist or is not a directory! Skipping AWS S3 Upload!" % self .source_dir )
54- else :
55- try :
56- self .timer .start (self .timer_name )
57- for file_name in os .listdir (self .source_dir ):
58- if self .bucket_prefix == "/" :
59- key_name = "/%s/%s" % (self .key_prefix , file_name )
60- else :
61- key_name = "%s/%s/%s" % (self .bucket_prefix , self .key_prefix , file_name )
62-
63- file_path = os .path .join (self .source_dir , file_name )
64- file_size = os .stat (file_path ).st_size
65- chunk_count = int (ceil (file_size / float (self .chunk_size )))
66-
67- logging .info ("Starting multipart AWS S3 upload to key: %s%s using %i threads, %imb chunks, %i retries" % (
55+ return
56+ try :
57+ self .timer .start (self .timer_name )
58+ for file_name in os .listdir (self .source_dir ):
59+ if self .bucket_prefix == "/" :
60+ key_name = "/%s/%s" % (self .key_prefix , file_name )
61+ else :
62+ key_name = "%s/%s/%s" % (self .bucket_prefix , self .key_prefix , file_name )
63+
64+ file_path = os .path .join (self .source_dir , file_name )
65+ file_size = os .stat (file_path ).st_size
66+ chunk_count = int (ceil (file_size / float (self .chunk_size )))
67+
68+ logging .info ("Starting multipart AWS S3 upload to key: %s%s using %i threads, %imb chunks, %i retries" % (
69+ self .bucket_name ,
70+ key_name ,
71+ self .thread_count ,
72+ self .chunk_size_mb ,
73+ self .retries
74+ ))
75+ self ._multipart = self .bucket .initiate_multipart_upload (key_name )
76+ self ._pool = Pool (processes = self .thread_count )
77+
78+ for i in range (chunk_count ):
79+ offset = self .chunk_size * i
80+ byte_count = min (self .chunk_size , file_size - offset )
81+ part_num = i + 1
82+ self ._pool .apply_async (S3UploadThread (
6883 self .bucket_name ,
69- key_name ,
70- self .thread_count ,
71- self .chunk_size_mb ,
72- self .retries
73- ))
74- self ._multipart = self .bucket .initiate_multipart_upload (key_name )
75- self ._pool = Pool (processes = self .thread_count )
76-
77- for i in range (chunk_count ):
78- offset = self .chunk_size * i
79- byte_count = min (self .chunk_size , file_size - offset )
80- part_num = i + 1
81- self ._pool .apply_async (S3UploadThread (
82- self .bucket_name ,
83- self .access_key ,
84- self .secret_key ,
85- self .s3_host ,
86- self ._multipart .id ,
87- part_num ,
88- file_path ,
89- offset ,
90- byte_count ,
91- self .retries ,
92- self .secure
93- ).run )
94- self ._pool .close ()
95- self ._pool .join ()
96-
97- if len (self ._multipart .get_all_parts ()) == chunk_count :
98- self ._multipart .complete_upload ()
99- key = self .bucket .get_key (key_name )
100- key .set_acl (self .s3_acl )
101- self ._upload_done = True
102-
103- if self .remove_uploaded :
104- logging .info ("Uploaded AWS S3 key: %s%s successfully. Removing local file" % (self .bucket_name , key_name ))
105- os .remove (os .path .join (self .source_dir , file_name ))
106- else :
107- logging .info ("Uploaded AWS S3 key: %s%s successfully" % (self .bucket_name , key_name ))
84+ self .access_key ,
85+ self .secret_key ,
86+ self .s3_host ,
87+ self ._multipart .id ,
88+ part_num ,
89+ file_path ,
90+ offset ,
91+ byte_count ,
92+ self .retries ,
93+ self .secure
94+ ).run )
95+ self ._pool .close ()
96+ self ._pool .join ()
97+
98+ if len (self ._multipart .get_all_parts ()) == chunk_count :
99+ self ._multipart .complete_upload ()
100+ key = self .bucket .get_key (key_name )
101+ key .set_acl (self .s3_acl )
102+ self ._upload_done = True
103+
104+ if self .remove_uploaded :
105+ logging .info ("Uploaded AWS S3 key: %s%s successfully. Removing local file" % (self .bucket_name , key_name ))
106+ os .remove (os .path .join (self .source_dir , file_name ))
108107 else :
109- self ._multipart .cancel_upload ()
110- logging .error ("Failed to upload all multiparts for key: %s%s! Upload cancelled" % (self .bucket_name , key_name ))
111- raise OperationError ("Failed to upload all multiparts for key: %s%s! Upload cancelled" % (self .bucket_name , key_name ))
112-
113- if self .remove_uploaded :
114- logging .info ("Removing backup source dir after successful AWS S3 upload of all backups" )
115- os .rmdir (self .source_dir )
116- self .timer .stop (self .timer_name )
117- except Exception , e :
118- logging .error ("Uploading to AWS S3 failed! Error: %s" % e )
119- if self ._multipart :
108+ logging .info ("Uploaded AWS S3 key: %s%s successfully" % (self .bucket_name , key_name ))
109+ else :
120110 self ._multipart .cancel_upload ()
121- raise OperationError (e )
111+ logging .error ("Failed to upload all multiparts for key: %s%s! Upload cancelled" % (self .bucket_name , key_name ))
112+ raise OperationError ("Failed to upload all multiparts for key: %s%s! Upload cancelled" % (self .bucket_name , key_name ))
113+
114+ if self .remove_uploaded :
115+ logging .info ("Removing backup source dir after successful AWS S3 upload of all backups" )
116+ os .rmdir (self .source_dir )
117+ self .timer .stop (self .timer_name )
118+ except Exception , e :
119+ logging .error ("Uploading to AWS S3 failed! Error: %s" % e )
120+ if self ._multipart :
121+ self ._multipart .cancel_upload ()
122+ raise OperationError (e )
123+ self .completed = True
122124
123125 def close (self ):
124126 if self ._pool :
0 commit comments