33 * Copyright (C) 2012 ARM Ltd.
44 * Copyright (C) 2020 Google LLC
55 */
6+ #include <linux/cma.h>
67#include <linux/debugfs.h>
8+ #include <linux/dma-contiguous.h>
79#include <linux/dma-direct.h>
810#include <linux/dma-noncoherent.h>
911#include <linux/init.h>
@@ -55,6 +57,29 @@ static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
5557 pool_size_kernel += size ;
5658}
5759
60+ static bool cma_in_zone (gfp_t gfp )
61+ {
62+ unsigned long size ;
63+ phys_addr_t end ;
64+ struct cma * cma ;
65+
66+ cma = dev_get_cma_area (NULL );
67+ if (!cma )
68+ return false;
69+
70+ size = cma_get_size (cma );
71+ if (!size )
72+ return false;
73+
74+ /* CMA can't cross zone boundaries, see cma_activate_area() */
75+ end = cma_get_base (cma ) + size - 1 ;
76+ if (IS_ENABLED (CONFIG_ZONE_DMA ) && (gfp & GFP_DMA ))
77+ return end <= DMA_BIT_MASK (zone_dma_bits );
78+ if (IS_ENABLED (CONFIG_ZONE_DMA32 ) && (gfp & GFP_DMA32 ))
79+ return end <= DMA_BIT_MASK (32 );
80+ return true;
81+ }
82+
5883static int atomic_pool_expand (struct gen_pool * pool , size_t pool_size ,
5984 gfp_t gfp )
6085{
@@ -68,7 +93,11 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
6893
6994 do {
7095 pool_size = 1 << (PAGE_SHIFT + order );
71- page = alloc_pages (gfp , order );
96+ if (cma_in_zone (gfp ))
97+ page = dma_alloc_from_contiguous (NULL , 1 << order ,
98+ order , false);
99+ if (!page )
100+ page = alloc_pages (gfp , order );
72101 } while (!page && order -- > 0 );
73102 if (!page )
74103 goto out ;
@@ -196,93 +225,75 @@ static int __init dma_atomic_pool_init(void)
196225}
197226postcore_initcall (dma_atomic_pool_init );
198227
199- static inline struct gen_pool * dma_guess_pool_from_device (struct device * dev )
228+ static inline struct gen_pool * dma_guess_pool (struct gen_pool * prev , gfp_t gfp )
200229{
201- u64 phys_mask ;
202- gfp_t gfp ;
203-
204- gfp = dma_direct_optimal_gfp_mask (dev , dev -> coherent_dma_mask ,
205- & phys_mask );
206- if (IS_ENABLED (CONFIG_ZONE_DMA ) && gfp == GFP_DMA )
230+ if (prev == NULL ) {
231+ if (IS_ENABLED (CONFIG_ZONE_DMA32 ) && (gfp & GFP_DMA32 ))
232+ return atomic_pool_dma32 ;
233+ if (IS_ENABLED (CONFIG_ZONE_DMA ) && (gfp & GFP_DMA ))
234+ return atomic_pool_dma ;
235+ return atomic_pool_kernel ;
236+ }
237+ if (prev == atomic_pool_kernel )
238+ return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma ;
239+ if (prev == atomic_pool_dma32 )
207240 return atomic_pool_dma ;
208- if (IS_ENABLED (CONFIG_ZONE_DMA32 ) && gfp == GFP_DMA32 )
209- return atomic_pool_dma32 ;
210- return atomic_pool_kernel ;
241+ return NULL ;
211242}
212243
213- static inline struct gen_pool * dma_get_safer_pool (struct gen_pool * bad_pool )
244+ static struct page * __dma_alloc_from_pool (struct device * dev , size_t size ,
245+ struct gen_pool * pool , void * * cpu_addr ,
246+ bool (* phys_addr_ok )(struct device * , phys_addr_t , size_t ))
214247{
215- if ( bad_pool == atomic_pool_kernel )
216- return atomic_pool_dma32 ? : atomic_pool_dma ;
248+ unsigned long addr ;
249+ phys_addr_t phys ;
217250
218- if (bad_pool == atomic_pool_dma32 )
219- return atomic_pool_dma ;
251+ addr = gen_pool_alloc (pool , size );
252+ if (!addr )
253+ return NULL ;
220254
221- return NULL ;
222- }
255+ phys = gen_pool_virt_to_phys (pool , addr );
256+ if (phys_addr_ok && !phys_addr_ok (dev , phys , size )) {
257+ gen_pool_free (pool , addr , size );
258+ return NULL ;
259+ }
223260
224- static inline struct gen_pool * dma_guess_pool (struct device * dev ,
225- struct gen_pool * bad_pool )
226- {
227- if (bad_pool )
228- return dma_get_safer_pool (bad_pool );
261+ if (gen_pool_avail (pool ) < atomic_pool_size )
262+ schedule_work (& atomic_pool_work );
229263
230- return dma_guess_pool_from_device (dev );
264+ * cpu_addr = (void * )addr ;
265+ memset (* cpu_addr , 0 , size );
266+ return pfn_to_page (__phys_to_pfn (phys ));
231267}
232268
233- void * dma_alloc_from_pool (struct device * dev , size_t size ,
234- struct page * * ret_page , gfp_t flags )
269+ struct page * dma_alloc_from_pool (struct device * dev , size_t size ,
270+ void * * cpu_addr , gfp_t gfp ,
271+ bool (* phys_addr_ok )(struct device * , phys_addr_t , size_t ))
235272{
236273 struct gen_pool * pool = NULL ;
237- unsigned long val = 0 ;
238- void * ptr = NULL ;
239- phys_addr_t phys ;
240-
241- while (1 ) {
242- pool = dma_guess_pool (dev , pool );
243- if (!pool ) {
244- WARN (1 , "Failed to get suitable pool for %s\n" ,
245- dev_name (dev ));
246- break ;
247- }
248-
249- val = gen_pool_alloc (pool , size );
250- if (!val )
251- continue ;
252-
253- phys = gen_pool_virt_to_phys (pool , val );
254- if (dma_coherent_ok (dev , phys , size ))
255- break ;
256-
257- gen_pool_free (pool , val , size );
258- val = 0 ;
259- }
260-
261-
262- if (val ) {
263- * ret_page = pfn_to_page (__phys_to_pfn (phys ));
264- ptr = (void * )val ;
265- memset (ptr , 0 , size );
274+ struct page * page ;
266275
267- if (gen_pool_avail (pool ) < atomic_pool_size )
268- schedule_work (& atomic_pool_work );
276+ while ((pool = dma_guess_pool (pool , gfp ))) {
277+ page = __dma_alloc_from_pool (dev , size , pool , cpu_addr ,
278+ phys_addr_ok );
279+ if (page )
280+ return page ;
269281 }
270282
271- return ptr ;
283+ WARN (1 , "Failed to get suitable pool for %s\n" , dev_name (dev ));
284+ return NULL ;
272285}
273286
274287bool dma_free_from_pool (struct device * dev , void * start , size_t size )
275288{
276289 struct gen_pool * pool = NULL ;
277290
278- while (1 ) {
279- pool = dma_guess_pool (dev , pool );
280- if (!pool )
281- return false;
282-
283- if (gen_pool_has_addr (pool , (unsigned long )start , size )) {
284- gen_pool_free (pool , (unsigned long )start , size );
285- return true;
286- }
291+ while ((pool = dma_guess_pool (pool , 0 ))) {
292+ if (!gen_pool_has_addr (pool , (unsigned long )start , size ))
293+ continue ;
294+ gen_pool_free (pool , (unsigned long )start , size );
295+ return true;
287296 }
297+
298+ return false;
288299}
0 commit comments