Skip to content

Commit bb196e3

Browse files
committed
iommu/dma: Factor out a iommu_dma_map_swiotlb helper
JIRA: https://issues.redhat.com/browse/RHEL-113839 Upstream-Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git commit ed18a46 Author: Christoph Hellwig <hch@lst.de> Date: Mon May 5 10:01:43 2025 +0300 iommu/dma: Factor out a iommu_dma_map_swiotlb helper Factor out a helper that maps swiotlb buffered I/O from iommu_dma_map_page. This helper will be reused for the upcoming iova range allocation interface. Reviewed-by: Leon Romanovsky <leonro@nvidia.com> Tested-by: Jens Axboe <axboe@kernel.dk> Reviewed-by: Luis Chamberlain <mcgrof@kernel.org> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> (cherry picked from commit ed18a46) Co-developed-by: Claude claude-sonnet-4 Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
1 parent 466a2e2 commit bb196e3

File tree

1 file changed

+41
-32
lines changed

1 file changed

+41
-32
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 41 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1161,6 +1161,43 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
11611161
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
11621162
}
11631163

1164+
static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
1165+
size_t size, enum dma_data_direction dir, unsigned long attrs)
1166+
{
1167+
struct iommu_domain *domain = iommu_get_dma_domain(dev);
1168+
struct iova_domain *iovad = &domain->iova_cookie->iovad;
1169+
1170+
if (!is_swiotlb_active(dev)) {
1171+
dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1172+
return (phys_addr_t)DMA_MAPPING_ERROR;
1173+
}
1174+
1175+
trace_swiotlb_bounced(dev, phys, size);
1176+
1177+
phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
1178+
attrs);
1179+
1180+
/*
1181+
* Untrusted devices should not see padding areas with random leftover
1182+
* kernel data, so zero the pre- and post-padding.
1183+
* swiotlb_tbl_map_single() has initialized the bounce buffer proper to
1184+
* the contents of the original memory buffer.
1185+
*/
1186+
if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
1187+
size_t start, virt = (size_t)phys_to_virt(phys);
1188+
1189+
/* Pre-padding */
1190+
start = iova_align_down(iovad, virt);
1191+
memset((void *)start, 0, virt - start);
1192+
1193+
/* Post-padding */
1194+
start = virt + size;
1195+
memset((void *)start, 0, iova_align(iovad, start) - start);
1196+
}
1197+
1198+
return phys;
1199+
}
1200+
11641201
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
11651202
unsigned long offset, size_t size, enum dma_data_direction dir,
11661203
unsigned long attrs)
@@ -1174,42 +1211,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
11741211
dma_addr_t iova, dma_mask = dma_get_mask(dev);
11751212

11761213
/*
1177-
* If both the physical buffer start address and size are
1178-
* page aligned, we don't need to use a bounce page.
1214+
* If both the physical buffer start address and size are page aligned,
1215+
* we don't need to use a bounce page.
11791216
*/
11801217
if (dev_use_swiotlb(dev, size, dir) &&
11811218
iova_offset(iovad, phys | size)) {
1182-
if (!is_swiotlb_active(dev)) {
1183-
dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1184-
return DMA_MAPPING_ERROR;
1185-
}
1186-
1187-
trace_swiotlb_bounced(dev, phys, size);
1188-
1189-
phys = swiotlb_tbl_map_single(dev, phys, size,
1190-
iova_mask(iovad), dir, attrs);
1191-
1192-
if (phys == DMA_MAPPING_ERROR)
1219+
phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
1220+
if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
11931221
return DMA_MAPPING_ERROR;
1194-
1195-
/*
1196-
* Untrusted devices should not see padding areas with random
1197-
* leftover kernel data, so zero the pre- and post-padding.
1198-
* swiotlb_tbl_map_single() has initialized the bounce buffer
1199-
* proper to the contents of the original memory buffer.
1200-
*/
1201-
if (dev_is_untrusted(dev)) {
1202-
size_t start, virt = (size_t)phys_to_virt(phys);
1203-
1204-
/* Pre-padding */
1205-
start = iova_align_down(iovad, virt);
1206-
memset((void *)start, 0, virt - start);
1207-
1208-
/* Post-padding */
1209-
start = virt + size;
1210-
memset((void *)start, 0,
1211-
iova_align(iovad, start) - start);
1212-
}
12131222
}
12141223

12151224
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))

0 commit comments

Comments
 (0)