Skip to content

Commit 38433a6

Browse files
schwesingervinodkoul
authored andcommitted
dmaengine: xilinx_dma: Support descriptor setup from dma_vecs
The DMAEngine provides an interface for obtaining DMA transaction descriptors from an array of scatter gather buffers represented by struct dma_vec. This interface is used in the DMABUF API of the IIO framework [1][2]. To enable DMABUF support through the IIO framework for the Xilinx DMA, implement callback .device_prep_peripheral_dma_vec() of struct dma_device in the driver. [1]: 7a86d46 ("iio: buffer-dmaengine: Support new DMABUF based userspace API") [2]: 5878853 ("dmaengine: Add API function dmaengine_prep_peripheral_dma_vec()") Signed-off-by: Folker Schwesinger <dev@folker-schwesinger.de> Reviewed-by: Suraj Gupta <suraj.gupta2@amd.com> Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com> Link: https://lore.kernel.org/r/DCCKQLKOZC06.2H6LJ8RJQJNV2@folker-schwesinger.de Signed-off-by: Vinod Koul <vkoul@kernel.org>
1 parent d9a3e99 commit 38433a6

File tree

1 file changed

+94
-0
lines changed

1 file changed

+94
-0
lines changed

drivers/dma/xilinx/xilinx_dma.c

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2172,6 +2172,99 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
21722172
return NULL;
21732173
}
21742174

2175+
/**
2176+
* xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE
2177+
* transaction from DMA vectors
2178+
* @dchan: DMA channel
2179+
* @vecs: Array of DMA vectors that should be transferred
2180+
* @nb: number of entries in @vecs
2181+
* @direction: DMA direction
2182+
* @flags: transfer ack flags
2183+
*
2184+
* Return: Async transaction descriptor on success and NULL on failure
2185+
*/
2186+
static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec(
2187+
struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb,
2188+
enum dma_transfer_direction direction, unsigned long flags)
2189+
{
2190+
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2191+
struct xilinx_dma_tx_descriptor *desc;
2192+
struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL;
2193+
size_t copy;
2194+
size_t sg_used;
2195+
unsigned int i;
2196+
2197+
if (!is_slave_direction(direction) || direction != chan->direction)
2198+
return NULL;
2199+
2200+
desc = xilinx_dma_alloc_tx_descriptor(chan);
2201+
if (!desc)
2202+
return NULL;
2203+
2204+
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2205+
desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2206+
2207+
/* Build transactions using information from DMA vectors */
2208+
for (i = 0; i < nb; i++) {
2209+
sg_used = 0;
2210+
2211+
/* Loop until the entire dma_vec entry is used */
2212+
while (sg_used < vecs[i].len) {
2213+
struct xilinx_axidma_desc_hw *hw;
2214+
2215+
/* Get a free segment */
2216+
segment = xilinx_axidma_alloc_tx_segment(chan);
2217+
if (!segment)
2218+
goto error;
2219+
2220+
/*
2221+
* Calculate the maximum number of bytes to transfer,
2222+
* making sure it is less than the hw limit
2223+
*/
2224+
copy = xilinx_dma_calc_copysize(chan, vecs[i].len,
2225+
sg_used);
2226+
hw = &segment->hw;
2227+
2228+
/* Fill in the descriptor */
2229+
xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0);
2230+
hw->control = copy;
2231+
2232+
if (prev)
2233+
prev->hw.next_desc = segment->phys;
2234+
2235+
prev = segment;
2236+
sg_used += copy;
2237+
2238+
/*
2239+
* Insert the segment into the descriptor segments
2240+
* list.
2241+
*/
2242+
list_add_tail(&segment->node, &desc->segments);
2243+
}
2244+
}
2245+
2246+
head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
2247+
desc->async_tx.phys = head->phys;
2248+
2249+
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
2250+
if (chan->direction == DMA_MEM_TO_DEV) {
2251+
segment->hw.control |= XILINX_DMA_BD_SOP;
2252+
segment = list_last_entry(&desc->segments,
2253+
struct xilinx_axidma_tx_segment,
2254+
node);
2255+
segment->hw.control |= XILINX_DMA_BD_EOP;
2256+
}
2257+
2258+
if (chan->xdev->has_axistream_connected)
2259+
desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
2260+
2261+
return &desc->async_tx;
2262+
2263+
error:
2264+
xilinx_dma_free_tx_descriptor(chan, desc);
2265+
return NULL;
2266+
}
2267+
21752268
/**
21762269
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
21772270
* @dchan: DMA channel
@@ -3180,6 +3273,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
31803273
xdev->common.device_config = xilinx_dma_device_config;
31813274
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
31823275
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3276+
xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec;
31833277
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
31843278
xdev->common.device_prep_dma_cyclic =
31853279
xilinx_dma_prep_dma_cyclic;

0 commit comments

Comments
 (0)