Skip to content

Commit 466a2e2

Browse files
committed
dma-mapping: Provide an interface to allow allocate IOVA
JIRA: https://issues.redhat.com/browse/RHEL-113839 Upstream-Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Conflicts: iommu_dma_free_iova still takes cookie in RHEL9. commit 393cf70 Author: Leon Romanovsky <leonro@nvidia.com> Date: Mon May 5 10:01:42 2025 +0300 dma-mapping: Provide an interface to allow allocate IOVA Extend DMA-API to provide an interface to allocate IOVA space. Such interface is needed to give to the callers ability to allocate IOVA space with specific alignment. After allocation of such space, callers will be able to reuse that IOVA space for multiple mappings. Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: Jens Axboe <axboe@kernel.dk> Reviewed-by: Luis Chamberlain <mcgrof@kernel.org> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> (cherry picked from commit 393cf70) Co-developed-by: Claude claude-sonnet-4 Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
1 parent f44cb9d commit 466a2e2

File tree

2 files changed

+134
-0
lines changed

2 files changed

+134
-0
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1746,6 +1746,92 @@ size_t iommu_dma_max_mapping_size(struct device *dev)
17461746
return SIZE_MAX;
17471747
}
17481748

1749+
/**
1750+
* dma_iova_try_alloc - Try to allocate an IOVA space
1751+
* @dev: Device to allocate the IOVA space for
1752+
* @state: IOVA state
1753+
* @phys: physical address
1754+
* @size: IOVA size
1755+
*
1756+
* Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
1757+
* for the given base address and size.
1758+
*
1759+
* Note: @phys is only used to calculate the IOVA alignment. Callers that always
1760+
* do PAGE_SIZE aligned transfers can safely pass 0 here.
1761+
*
1762+
* Returns %true if the IOVA-based DMA API can be used and IOVA space has been
1763+
* allocated, or %false if the regular DMA API should be used.
1764+
*/
1765+
bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
1766+
phys_addr_t phys, size_t size)
1767+
{
1768+
struct iommu_dma_cookie *cookie;
1769+
struct iommu_domain *domain;
1770+
struct iova_domain *iovad;
1771+
size_t iova_off;
1772+
dma_addr_t addr;
1773+
1774+
memset(state, 0, sizeof(*state));
1775+
if (!use_dma_iommu(dev))
1776+
return false;
1777+
1778+
domain = iommu_get_dma_domain(dev);
1779+
cookie = domain->iova_cookie;
1780+
iovad = &cookie->iovad;
1781+
iova_off = iova_offset(iovad, phys);
1782+
1783+
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
1784+
iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev)))
1785+
return false;
1786+
1787+
if (WARN_ON_ONCE(!size))
1788+
return false;
1789+
1790+
/*
1791+
* DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu
1792+
* internals, make sure that caller didn't set it and/or
1793+
* didn't use this interface to map SIZE_MAX.
1794+
*/
1795+
if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB))
1796+
return false;
1797+
1798+
addr = iommu_dma_alloc_iova(domain,
1799+
iova_align(iovad, size + iova_off),
1800+
dma_get_mask(dev), dev);
1801+
if (!addr)
1802+
return false;
1803+
1804+
state->addr = addr + iova_off;
1805+
state->__size = size;
1806+
return true;
1807+
}
1808+
EXPORT_SYMBOL_GPL(dma_iova_try_alloc);
1809+
1810+
/**
1811+
* dma_iova_free - Free an IOVA space
1812+
* @dev: Device to free the IOVA space for
1813+
* @state: IOVA state
1814+
*
1815+
* Undoes a successful dma_try_iova_alloc().
1816+
*
1817+
* Note that all dma_iova_link() calls need to be undone first. For callers
1818+
* that never call dma_iova_unlink(), dma_iova_destroy() can be used instead
1819+
* which unlinks all ranges and frees the IOVA space in a single efficient
1820+
* operation.
1821+
*/
1822+
void dma_iova_free(struct device *dev, struct dma_iova_state *state)
1823+
{
1824+
struct iommu_domain *domain = iommu_get_dma_domain(dev);
1825+
struct iommu_dma_cookie *cookie = domain->iova_cookie;
1826+
struct iova_domain *iovad = &cookie->iovad;
1827+
size_t iova_start_pad = iova_offset(iovad, state->addr);
1828+
size_t size = dma_iova_size(state);
1829+
1830+
iommu_dma_free_iova(cookie, state->addr - iova_start_pad,
1831+
iova_align(iovad, size + iova_start_pad), NULL);
1832+
}
1833+
EXPORT_SYMBOL_GPL(dma_iova_free);
1834+
17491835
void iommu_setup_dma_ops(struct device *dev)
17501836
{
17511837
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);

include/linux/dma-mapping.h

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,22 @@
7272

7373
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
7474

75+
struct dma_iova_state {
76+
dma_addr_t addr;
77+
u64 __size;
78+
};
79+
80+
/*
81+
* Use the high bit to mark if we used swiotlb for one or more ranges.
82+
*/
83+
#define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
84+
85+
static inline size_t dma_iova_size(struct dma_iova_state *state)
86+
{
87+
/* Casting is needed for 32-bits systems */
88+
return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
89+
}
90+
7591
#ifdef CONFIG_DMA_API_DEBUG
7692
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
7793
void debug_dma_map_single(struct device *dev, const void *addr,
@@ -277,6 +293,38 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
277293
}
278294
#endif /* CONFIG_HAS_DMA */
279295

296+
#ifdef CONFIG_IOMMU_DMA
297+
/**
298+
* dma_use_iova - check if the IOVA API is used for this state
299+
* @state: IOVA state
300+
*
301+
* Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
302+
* they can't be used.
303+
*/
304+
static inline bool dma_use_iova(struct dma_iova_state *state)
305+
{
306+
return state->__size != 0;
307+
}
308+
309+
bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
310+
phys_addr_t phys, size_t size);
311+
void dma_iova_free(struct device *dev, struct dma_iova_state *state);
312+
#else /* CONFIG_IOMMU_DMA */
313+
static inline bool dma_use_iova(struct dma_iova_state *state)
314+
{
315+
return false;
316+
}
317+
static inline bool dma_iova_try_alloc(struct device *dev,
318+
struct dma_iova_state *state, phys_addr_t phys, size_t size)
319+
{
320+
return false;
321+
}
322+
static inline void dma_iova_free(struct device *dev,
323+
struct dma_iova_state *state)
324+
{
325+
}
326+
#endif /* CONFIG_IOMMU_DMA */
327+
280328
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
281329
void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
282330
enum dma_data_direction dir);

0 commit comments

Comments
 (0)