@@ -223,6 +223,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
223223 debug_dma_map_sg (dev , sg , nents , ents , dir , attrs );
224224 } else if (WARN_ON_ONCE (ents != - EINVAL && ents != - ENOMEM &&
225225 ents != - EIO && ents != - EREMOTEIO )) {
226+ trace_dma_map_sg_err (dev , sg , nents , ents , dir , attrs );
226227 return - EIO ;
227228 }
228229
@@ -604,20 +605,26 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
604605 if (WARN_ON_ONCE (flag & __GFP_COMP ))
605606 return NULL ;
606607
607- if (dma_alloc_from_dev_coherent (dev , size , dma_handle , & cpu_addr ))
608+ if (dma_alloc_from_dev_coherent (dev , size , dma_handle , & cpu_addr )) {
609+ trace_dma_alloc (dev , cpu_addr , * dma_handle , size ,
610+ DMA_BIDIRECTIONAL , flag , attrs );
608611 return cpu_addr ;
612+ }
609613
610614 /* let the implementation decide on the zone to allocate from: */
611615 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM );
612616
613- if (dma_alloc_direct (dev , ops ))
617+ if (dma_alloc_direct (dev , ops )) {
614618 cpu_addr = dma_direct_alloc (dev , size , dma_handle , flag , attrs );
615- else if (use_dma_iommu (dev ))
619+ } else if (use_dma_iommu (dev )) {
616620 cpu_addr = iommu_dma_alloc (dev , size , dma_handle , flag , attrs );
617- else if (ops -> alloc )
621+ } else if (ops -> alloc ) {
618622 cpu_addr = ops -> alloc (dev , size , dma_handle , flag , attrs );
619- else
623+ } else {
624+ trace_dma_alloc (dev , NULL , 0 , size , DMA_BIDIRECTIONAL , flag ,
625+ attrs );
620626 return NULL ;
627+ }
621628
622629 trace_dma_alloc (dev , cpu_addr , * dma_handle , size , DMA_BIDIRECTIONAL ,
623630 flag , attrs );
@@ -642,11 +649,11 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
642649 */
643650 WARN_ON (irqs_disabled ());
644651
652+ trace_dma_free (dev , cpu_addr , dma_handle , size , DMA_BIDIRECTIONAL ,
653+ attrs );
645654 if (!cpu_addr )
646655 return ;
647656
648- trace_dma_free (dev , cpu_addr , dma_handle , size , DMA_BIDIRECTIONAL ,
649- attrs );
650657 debug_dma_free_coherent (dev , size , cpu_addr , dma_handle );
651658 if (dma_alloc_direct (dev , ops ))
652659 dma_direct_free (dev , size , cpu_addr , dma_handle , attrs );
@@ -688,6 +695,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
688695 trace_dma_alloc_pages (dev , page_to_virt (page ), * dma_handle ,
689696 size , dir , gfp , 0 );
690697 debug_dma_map_page (dev , page , 0 , size , dir , * dma_handle , 0 );
698+ } else {
699+ trace_dma_alloc_pages (dev , NULL , 0 , size , dir , gfp , 0 );
691700 }
692701 return page ;
693702}
@@ -772,6 +781,8 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
772781 sgt -> nents = 1 ;
773782 trace_dma_alloc_sgt (dev , sgt , size , dir , gfp , attrs );
774783 debug_dma_map_sg (dev , sgt -> sgl , sgt -> orig_nents , 1 , dir , attrs );
784+ } else {
785+ trace_dma_alloc_sgt_err (dev , NULL , 0 , size , gfp , dir , attrs );
775786 }
776787 return sgt ;
777788}
0 commit comments