@@ -199,6 +199,22 @@ func createStreamingMultipartRequestBody(files map[string][]string, formDataFiel
199199}
200200
201201// addFilePart adds a base64 encoded file part to the multipart writer with the provided field name and file path.
202+ // This function opens the specified file, sets the appropriate content type and headers, and adds it to the multipart writer.
203+
204+ // Parameters:
205+ // - writer: The multipart writer used to construct the multipart request body.
206+ // - fieldName: The field name for the file part.
207+ // - filePath: The path to the file to be included in the request.
208+ // - fileContentTypes: A map specifying the content type for each file part. The key is the field name and the value is the
209+ // content type (e.g., "image/jpeg").
210+ // - formDataPartHeaders: A map specifying custom headers for each part of the multipart form data. The key is the field name
211+ // and the value is an http.Header containing the headers for that part.
212+ // - log: An instance of a logger implementing the logger.Logger interface, used to log informational messages, warnings,
213+ // and errors encountered during the addition of the file part.
214+
215+ // Returns:
216+ // - error: An error object indicating failure during the addition of the file part. This could be due to issues such as
217+ // file reading errors or multipart writer errors.
202218func addFilePart (writer * multipart.Writer , fieldName , filePath string , fileContentTypes map [string ]string , formDataPartHeaders map [string ]http.Header , log logger.Logger ) error {
203219 file , err := os .Open (filePath )
204220 if err != nil {
@@ -294,24 +310,27 @@ func setFormDataPartHeader(fieldname, filename, contentType string, customHeader
294310
295311// chunkFileUpload reads the file upload into chunks and writes it to the writer.
296312// This function reads the file in chunks and writes it to the provided writer, allowing for progress logging during the upload.
297- // chunk size is set to 8192 KB (8 MB) by default. This is a common chunk size used for file uploads to cloud storage services.
313+ // The chunk size is set to 8192 KB (8 MB) by default. This is a common chunk size used for file uploads to cloud storage services.
298314
299315// Azure Blob Storage has a minimum chunk size of 4 MB and a maximum of 100 MB for block blobs.
300316// GCP Cloud Storage has a minimum chunk size of 256 KB and a maximum of 5 GB.
301317// AWS S3 has a minimum chunk size of 5 MB and a maximum of 5 GB.
302318
319+ // The function also calculates the total number of chunks and logs the chunk number during the upload process.
320+
303321// Parameters:
304322// - file: The file to be uploaded.
305323// - writer: The writer to which the file content will be written.
306324// - log: An instance of a logger implementing the logger.Logger interface, used to log informational messages, warnings,
307325// and errors encountered during the file upload.
308326// - updateProgress: A function to update the upload progress, typically used for logging purposes.
327+ // - uploadState: A pointer to an UploadState struct used to track the progress of the file upload for resumable uploads.
309328
310329// Returns:
311330// - error: An error object indicating failure during the file upload. This could be due to issues such as file reading errors
312331// or writer errors.
313332func chunkFileUpload (file * os.File , writer io.Writer , log logger.Logger , updateProgress func (int64 ), uploadState * UploadState ) error {
314- const chunkSize = 8 * 1024 * 1024 // 8 * 1024 * 1024 bytes (8 MB)
333+ const chunkSize = 8 * 1024 * 1024 // 8 MB
315334 buffer := make ([]byte , chunkSize )
316335 totalWritten := int64 (0 )
317336 chunkWritten := int64 (0 )
@@ -320,6 +339,14 @@ func chunkFileUpload(file *os.File, writer io.Writer, log logger.Logger, updateP
320339 // Seek to the last uploaded byte
321340 file .Seek (uploadState .LastUploadedByte , io .SeekStart )
322341
342+ // Calculate the total number of chunks
343+ fileInfo , err := file .Stat ()
344+ if err != nil {
345+ return fmt .Errorf ("failed to get file info: %v" , err )
346+ }
347+ totalChunks := (fileInfo .Size () + chunkSize - 1 ) / chunkSize
348+ currentChunk := uploadState .LastUploadedByte / chunkSize
349+
323350 for {
324351 n , err := file .Read (buffer )
325352 if err != nil && err != io .EOF {
@@ -343,8 +370,11 @@ func chunkFileUpload(file *os.File, writer io.Writer, log logger.Logger, updateP
343370 updateProgress (int64 (written ))
344371
345372 if chunkWritten >= chunkSize {
373+ currentChunk ++
346374 log .Debug ("File Upload Chunk Sent" ,
347375 zap .String ("file_name" , fileName ),
376+ zap .Int64 ("chunk_number" , currentChunk ),
377+ zap .Int64 ("total_chunks" , totalChunks ),
348378 zap .Int64 ("kb_sent" , chunkWritten / 1024 ),
349379 zap .Int64 ("total_kb_sent" , totalWritten / 1024 ))
350380 chunkWritten = 0
@@ -353,8 +383,11 @@ func chunkFileUpload(file *os.File, writer io.Writer, log logger.Logger, updateP
353383
354384 // Log any remaining bytes that were written but didn't reach the log threshold
355385 if chunkWritten > 0 {
386+ currentChunk ++
356387 log .Debug ("Final Upload Chunk Sent" ,
357388 zap .String ("file_name" , fileName ),
389+ zap .Int64 ("chunk_number" , currentChunk ),
390+ zap .Int64 ("total_chunks" , totalChunks ),
358391 zap .Int64 ("kb_sent" , chunkWritten / 1024 ),
359392 zap .Int64 ("total_kb_sent" , totalWritten / 1024 ))
360393 }
0 commit comments