Skip to content

Commit 0b783c1

Browse files
committed
igc: add xdp frags support to ndo_xdp_xmit
Author: Lorenzo Bianconi <lorenzo@kernel.org> Add the capability to map non-linear xdp frames in XDP_TX and ndo_xdp_xmit callback. Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Tested-by: Naama Meir <naamax.meir@linux.intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Link: https://lore.kernel.org/r/20220817173628.109102-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> (cherry picked from commit 8c78c1e) Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2104471 Signed-off-by: Corinna Vinschen <vinschen@redhat.com>
1 parent 699d46c commit 0b783c1

File tree

1 file changed

+83
-45
lines changed

1 file changed

+83
-45
lines changed

drivers/net/ethernet/intel/igc/igc_main.c

Lines changed: 83 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -2129,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
21292129
return ok;
21302130
}
21312131

2132-
static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
2133-
struct xdp_frame *xdpf,
2134-
struct igc_ring *ring)
2135-
{
2136-
dma_addr_t dma;
2137-
2138-
dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
2139-
if (dma_mapping_error(ring->dev, dma)) {
2140-
netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
2141-
return -ENOMEM;
2142-
}
2143-
2144-
buffer->type = IGC_TX_BUFFER_TYPE_XDP;
2145-
buffer->xdpf = xdpf;
2146-
buffer->protocol = 0;
2147-
buffer->bytecount = xdpf->len;
2148-
buffer->gso_segs = 1;
2149-
buffer->time_stamp = jiffies;
2150-
dma_unmap_len_set(buffer, len, xdpf->len);
2151-
dma_unmap_addr_set(buffer, dma, dma);
2152-
return 0;
2153-
}
2154-
21552132
/* This function requires __netif_tx_lock is held by the caller. */
21562133
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
21572134
struct xdp_frame *xdpf)
21582135
{
2159-
struct igc_tx_buffer *buffer;
2160-
union igc_adv_tx_desc *desc;
2161-
u32 cmd_type, olinfo_status;
2162-
int err;
2136+
struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2137+
u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2138+
u16 count, index = ring->next_to_use;
2139+
struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
2140+
struct igc_tx_buffer *buffer = head;
2141+
union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
2142+
u32 olinfo_status, len = xdpf->len, cmd_type;
2143+
void *data = xdpf->data;
2144+
u16 i;
21632145

2164-
if (!igc_desc_unused(ring))
2165-
return -EBUSY;
2146+
count = TXD_USE_COUNT(len);
2147+
for (i = 0; i < nr_frags; i++)
2148+
count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
21662149

2167-
buffer = &ring->tx_buffer_info[ring->next_to_use];
2168-
err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
2169-
if (err)
2170-
return err;
2150+
if (igc_maybe_stop_tx(ring, count + 3)) {
2151+
/* this is a hard error */
2152+
return -EBUSY;
2153+
}
21712154

2172-
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2173-
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2174-
buffer->bytecount;
2175-
olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2155+
i = 0;
2156+
head->bytecount = xdp_get_frame_len(xdpf);
2157+
head->type = IGC_TX_BUFFER_TYPE_XDP;
2158+
head->gso_segs = 1;
2159+
head->xdpf = xdpf;
21762160

2177-
desc = IGC_TX_DESC(ring, ring->next_to_use);
2178-
desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2161+
olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
21792162
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2180-
desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
21812163

2182-
netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
2164+
for (;;) {
2165+
dma_addr_t dma;
21832166

2184-
buffer->next_to_watch = desc;
2167+
dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
2168+
if (dma_mapping_error(ring->dev, dma)) {
2169+
netdev_err_once(ring->netdev,
2170+
"Failed to map DMA for TX\n");
2171+
goto unmap;
2172+
}
21852173

2186-
ring->next_to_use++;
2187-
if (ring->next_to_use == ring->count)
2188-
ring->next_to_use = 0;
2174+
dma_unmap_len_set(buffer, len, len);
2175+
dma_unmap_addr_set(buffer, dma, dma);
2176+
2177+
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2178+
IGC_ADVTXD_DCMD_IFCS | len;
2179+
2180+
desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2181+
desc->read.buffer_addr = cpu_to_le64(dma);
2182+
2183+
buffer->protocol = 0;
2184+
2185+
if (++index == ring->count)
2186+
index = 0;
2187+
2188+
if (i == nr_frags)
2189+
break;
2190+
2191+
buffer = &ring->tx_buffer_info[index];
2192+
desc = IGC_TX_DESC(ring, index);
2193+
desc->read.olinfo_status = 0;
2194+
2195+
data = skb_frag_address(&sinfo->frags[i]);
2196+
len = skb_frag_size(&sinfo->frags[i]);
2197+
i++;
2198+
}
2199+
desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
2200+
2201+
netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
2202+
/* set the timestamp */
2203+
head->time_stamp = jiffies;
2204+
/* set next_to_watch value indicating a packet is present */
2205+
head->next_to_watch = desc;
2206+
ring->next_to_use = index;
21892207

21902208
return 0;
2209+
2210+
unmap:
2211+
for (;;) {
2212+
buffer = &ring->tx_buffer_info[index];
2213+
if (dma_unmap_len(buffer, len))
2214+
dma_unmap_page(ring->dev,
2215+
dma_unmap_addr(buffer, dma),
2216+
dma_unmap_len(buffer, len),
2217+
DMA_TO_DEVICE);
2218+
dma_unmap_len_set(buffer, len, 0);
2219+
if (buffer == head)
2220+
break;
2221+
2222+
if (!index)
2223+
index += ring->count;
2224+
index--;
2225+
}
2226+
2227+
return -ENOMEM;
21912228
}
21922229

21932230
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2369,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
23692406
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
23702407
igc_rx_offset(rx_ring) + pkt_offset,
23712408
size, true);
2409+
xdp_buff_clear_frags_flag(&xdp);
23722410

23732411
skb = igc_xdp_run_prog(adapter, &xdp);
23742412
}

0 commit comments

Comments
 (0)