@@ -511,7 +511,12 @@ unsigned int HapDecode(const void *inputBuffer, unsigned long inputBufferBytes,
511511 /*
512512 The Chunk Second-Stage Compressor Table and Chunk Size Table are required
513513 */
514- if (compressors && chunk_sizes && chunk_count )
514+ if (compressors == NULL || chunk_sizes == NULL )
515+ {
516+ return HapResult_Bad_Frame ;
517+ }
518+
519+ if (chunk_count > 0 )
515520 {
516521 /*
517522 Step through the chunks, storing information for their decompression
@@ -578,22 +583,23 @@ unsigned int HapDecode(const void *inputBuffer, unsigned long inputBufferBytes,
578583 result = HapResult_Buffer_Too_Small ;
579584 }
580585
581- if (result ! = HapResult_No_Error )
586+ if (result = = HapResult_No_Error )
582587 {
583- free (chunk_info );
584- return result ;
585- }
586-
587- bytesUsed = running_uncompressed_chunk_size ;
588+ bytesUsed = running_uncompressed_chunk_size ;
588589
589- callback ((HapDecodeWorkFunction )hap_decode_chunk , chunk_info , chunk_count , info );
590+ callback ((HapDecodeWorkFunction )hap_decode_chunk , chunk_info , chunk_count , info );
590591
591- /*
592- Check to see if we encountered an error
593- */
594- for (i = 0 ; i < chunk_count ; i ++ )
595- {
596- if (chunk_info [i ].result != HapResult_No_Error ) result = chunk_info [i ].result ;
592+ /*
593+ Check to see if we encountered any errors and report one of them
594+ */
595+ for (i = 0 ; i < chunk_count ; i ++ )
596+ {
597+ if (chunk_info [i ].result != HapResult_No_Error )
598+ {
599+ result = chunk_info [i ].result ;
600+ break ;
601+ }
602+ }
597603 }
598604
599605 free (chunk_info );
0 commit comments