Skip to content

Commit 1f8191a

Browse files
author
Herton R. Krzesinski
committed
Merge: igc: Driver Update
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/1775 Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2104471 Tested: basic functionality, ethtool, connectivity, speed Signed-off-by: Corinna Vinschen <vinschen@redhat.com> 790835f (Muhammad Husaini Zulkifli) igc: Correct the launchtime offset 068c38a (Thomas Gleixner) net: Remove the obsolte u64_stats_fetch_*_irq() users (drivers). 2c5e5ab (Sasha Neftin) igc: Remove IGC_MDIC_INT_EN definition 8c78c1e (Lorenzo Bianconi) igc: add xdp frags support to ndo_xdp_xmit 6ac0db3 (Sasha Neftin) igc: Remove forced_speed_duplex value fb24f34 (Sasha Neftin) igc: Remove MSI-X PBA Clear register a5fd394 (Kurt Kanzenbach) igc: Lift TAPRIO schedule restriction drivers/net/ethernet/intel/igc/igc_defines.h | 10 +- drivers/net/ethernet/intel/igc/igc_ethtool.c | 12 +- drivers/net/ethernet/intel/igc/igc_hw.h | 2 - drivers/net/ethernet/intel/igc/igc_main.c | 166 ++++++++++++++++++--------- drivers/net/ethernet/intel/igc/igc_regs.h | 4 +- drivers/net/ethernet/intel/igc/igc_tsn.c | 30 +++++ drivers/net/ethernet/intel/igc/igc_tsn.h | 1 + 7 files changed, 158 insertions(+), 67 deletions(-) Approved-by: Stefan Assmann <sassmann@redhat.com> Approved-by: Ken Cox <jkc@redhat.com> Approved-by: Jarod Wilson <jarod@redhat.com> Approved-by: Kamal Heib <kheib@redhat.com> Signed-off-by: Herton R. Krzesinski <herton@redhat.com>
2 parents a1d64eb + 66f7602 commit 1f8191a

File tree

7 files changed

+158
-67
lines changed

7 files changed

+158
-67
lines changed

drivers/net/ethernet/intel/igc/igc_defines.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -400,6 +400,15 @@
400400
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
401401
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
402402

403+
/* Transmit Scheduling Latency */
404+
/* Latency between transmission scheduling (LaunchTime) and the time
405+
* the packet is transmitted to the network in nanosecond.
406+
*/
407+
#define IGC_TXOFFSET_SPEED_10 0x000034BC
408+
#define IGC_TXOFFSET_SPEED_100 0x00000578
409+
#define IGC_TXOFFSET_SPEED_1000 0x0000012C
410+
#define IGC_TXOFFSET_SPEED_2500 0x00000578
411+
403412
/* Time Sync Interrupt Causes */
404413
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
405414
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
@@ -610,7 +619,6 @@
610619
#define IGC_MDIC_OP_WRITE 0x04000000
611620
#define IGC_MDIC_OP_READ 0x08000000
612621
#define IGC_MDIC_READY 0x10000000
613-
#define IGC_MDIC_INT_EN 0x20000000
614622
#define IGC_MDIC_ERROR 0x40000000
615623

616624
#define IGC_N0_QUEUE -1

drivers/net/ethernet/intel/igc/igc_ethtool.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -839,29 +839,29 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
839839

840840
ring = adapter->tx_ring[j];
841841
do {
842-
start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
842+
start = u64_stats_fetch_begin(&ring->tx_syncp);
843843
data[i] = ring->tx_stats.packets;
844844
data[i + 1] = ring->tx_stats.bytes;
845845
data[i + 2] = ring->tx_stats.restart_queue;
846-
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
846+
} while (u64_stats_fetch_retry(&ring->tx_syncp, start));
847847
do {
848-
start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
848+
start = u64_stats_fetch_begin(&ring->tx_syncp2);
849849
restart2 = ring->tx_stats.restart_queue2;
850-
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
850+
} while (u64_stats_fetch_retry(&ring->tx_syncp2, start));
851851
data[i + 2] += restart2;
852852

853853
i += IGC_TX_QUEUE_STATS_LEN;
854854
}
855855
for (j = 0; j < adapter->num_rx_queues; j++) {
856856
ring = adapter->rx_ring[j];
857857
do {
858-
start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
858+
start = u64_stats_fetch_begin(&ring->rx_syncp);
859859
data[i] = ring->rx_stats.packets;
860860
data[i + 1] = ring->rx_stats.bytes;
861861
data[i + 2] = ring->rx_stats.drops;
862862
data[i + 3] = ring->rx_stats.csum_err;
863863
data[i + 4] = ring->rx_stats.alloc_failed;
864-
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
864+
} while (u64_stats_fetch_retry(&ring->rx_syncp, start));
865865
i += IGC_RX_QUEUE_STATS_LEN;
866866
}
867867
spin_unlock(&adapter->stats64_lock);

drivers/net/ethernet/intel/igc/igc_hw.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,6 @@ struct igc_mac_info {
8989
u32 mta_shadow[MAX_MTA_REG];
9090
u16 rar_entry_count;
9191

92-
u8 forced_speed_duplex;
93-
9492
bool asf_firmware_present;
9593
bool arc_subsystem_valid;
9694

drivers/net/ethernet/intel/igc/igc_main.c

Lines changed: 111 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -2129,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
21292129
return ok;
21302130
}
21312131

2132-
static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
2133-
struct xdp_frame *xdpf,
2134-
struct igc_ring *ring)
2135-
{
2136-
dma_addr_t dma;
2137-
2138-
dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
2139-
if (dma_mapping_error(ring->dev, dma)) {
2140-
netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
2141-
return -ENOMEM;
2142-
}
2143-
2144-
buffer->type = IGC_TX_BUFFER_TYPE_XDP;
2145-
buffer->xdpf = xdpf;
2146-
buffer->protocol = 0;
2147-
buffer->bytecount = xdpf->len;
2148-
buffer->gso_segs = 1;
2149-
buffer->time_stamp = jiffies;
2150-
dma_unmap_len_set(buffer, len, xdpf->len);
2151-
dma_unmap_addr_set(buffer, dma, dma);
2152-
return 0;
2153-
}
2154-
21552132
/* This function requires __netif_tx_lock is held by the caller. */
21562133
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
21572134
struct xdp_frame *xdpf)
21582135
{
2159-
struct igc_tx_buffer *buffer;
2160-
union igc_adv_tx_desc *desc;
2161-
u32 cmd_type, olinfo_status;
2162-
int err;
2136+
struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2137+
u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2138+
u16 count, index = ring->next_to_use;
2139+
struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
2140+
struct igc_tx_buffer *buffer = head;
2141+
union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
2142+
u32 olinfo_status, len = xdpf->len, cmd_type;
2143+
void *data = xdpf->data;
2144+
u16 i;
21632145

2164-
if (!igc_desc_unused(ring))
2165-
return -EBUSY;
2146+
count = TXD_USE_COUNT(len);
2147+
for (i = 0; i < nr_frags; i++)
2148+
count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
21662149

2167-
buffer = &ring->tx_buffer_info[ring->next_to_use];
2168-
err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
2169-
if (err)
2170-
return err;
2150+
if (igc_maybe_stop_tx(ring, count + 3)) {
2151+
/* this is a hard error */
2152+
return -EBUSY;
2153+
}
21712154

2172-
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2173-
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2174-
buffer->bytecount;
2175-
olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2155+
i = 0;
2156+
head->bytecount = xdp_get_frame_len(xdpf);
2157+
head->type = IGC_TX_BUFFER_TYPE_XDP;
2158+
head->gso_segs = 1;
2159+
head->xdpf = xdpf;
21762160

2177-
desc = IGC_TX_DESC(ring, ring->next_to_use);
2178-
desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2161+
olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
21792162
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2180-
desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
21812163

2182-
netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
2164+
for (;;) {
2165+
dma_addr_t dma;
2166+
2167+
dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
2168+
if (dma_mapping_error(ring->dev, dma)) {
2169+
netdev_err_once(ring->netdev,
2170+
"Failed to map DMA for TX\n");
2171+
goto unmap;
2172+
}
2173+
2174+
dma_unmap_len_set(buffer, len, len);
2175+
dma_unmap_addr_set(buffer, dma, dma);
21832176

2184-
buffer->next_to_watch = desc;
2177+
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2178+
IGC_ADVTXD_DCMD_IFCS | len;
21852179

2186-
ring->next_to_use++;
2187-
if (ring->next_to_use == ring->count)
2188-
ring->next_to_use = 0;
2180+
desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2181+
desc->read.buffer_addr = cpu_to_le64(dma);
2182+
2183+
buffer->protocol = 0;
2184+
2185+
if (++index == ring->count)
2186+
index = 0;
2187+
2188+
if (i == nr_frags)
2189+
break;
2190+
2191+
buffer = &ring->tx_buffer_info[index];
2192+
desc = IGC_TX_DESC(ring, index);
2193+
desc->read.olinfo_status = 0;
2194+
2195+
data = skb_frag_address(&sinfo->frags[i]);
2196+
len = skb_frag_size(&sinfo->frags[i]);
2197+
i++;
2198+
}
2199+
desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
2200+
2201+
netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
2202+
/* set the timestamp */
2203+
head->time_stamp = jiffies;
2204+
/* set next_to_watch value indicating a packet is present */
2205+
head->next_to_watch = desc;
2206+
ring->next_to_use = index;
21892207

21902208
return 0;
2209+
2210+
unmap:
2211+
for (;;) {
2212+
buffer = &ring->tx_buffer_info[index];
2213+
if (dma_unmap_len(buffer, len))
2214+
dma_unmap_page(ring->dev,
2215+
dma_unmap_addr(buffer, dma),
2216+
dma_unmap_len(buffer, len),
2217+
DMA_TO_DEVICE);
2218+
dma_unmap_len_set(buffer, len, 0);
2219+
if (buffer == head)
2220+
break;
2221+
2222+
if (!index)
2223+
index += ring->count;
2224+
index--;
2225+
}
2226+
2227+
return -ENOMEM;
21912228
}
21922229

21932230
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2369,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
23692406
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
23702407
igc_rx_offset(rx_ring) + pkt_offset,
23712408
size, true);
2409+
xdp_buff_clear_frags_flag(&xdp);
23722410

23732411
skb = igc_xdp_run_prog(adapter, &xdp);
23742412
}
@@ -4644,10 +4682,10 @@ void igc_update_stats(struct igc_adapter *adapter)
46444682
}
46454683

46464684
do {
4647-
start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
4685+
start = u64_stats_fetch_begin(&ring->rx_syncp);
46484686
_bytes = ring->rx_stats.bytes;
46494687
_packets = ring->rx_stats.packets;
4650-
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
4688+
} while (u64_stats_fetch_retry(&ring->rx_syncp, start));
46514689
bytes += _bytes;
46524690
packets += _packets;
46534691
}
@@ -4661,10 +4699,10 @@ void igc_update_stats(struct igc_adapter *adapter)
46614699
struct igc_ring *ring = adapter->tx_ring[i];
46624700

46634701
do {
4664-
start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
4702+
start = u64_stats_fetch_begin(&ring->tx_syncp);
46654703
_bytes = ring->tx_stats.bytes;
46664704
_packets = ring->tx_stats.packets;
4667-
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
4705+
} while (u64_stats_fetch_retry(&ring->tx_syncp, start));
46684706
bytes += _bytes;
46694707
packets += _packets;
46704708
}
@@ -5343,6 +5381,13 @@ static void igc_watchdog_task(struct work_struct *work)
53435381
break;
53445382
}
53455383

5384+
/* Once the launch time has been set on the wire, there
5385+
* is a delay before the link speed can be determined
5386+
* based on link-up activity. Write into the register
5387+
* as soon as we know the correct link speed.
5388+
*/
5389+
igc_tsn_adjust_txtime_offset(adapter);
5390+
53465391
if (adapter->link_speed != SPEED_1000)
53475392
goto no_wait;
53485393

@@ -5812,9 +5857,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
58125857
return false;
58135858

58145859
for (n = 0; n < qopt->num_entries; n++) {
5815-
const struct tc_taprio_sched_entry *e;
5860+
const struct tc_taprio_sched_entry *e, *prev;
58165861
int i;
58175862

5863+
prev = n ? &qopt->entries[n - 1] : NULL;
58185864
e = &qopt->entries[n];
58195865

58205866
/* i225 only supports "global" frame preemption
@@ -5827,7 +5873,12 @@ static bool validate_schedule(struct igc_adapter *adapter,
58275873
if (e->gate_mask & BIT(i))
58285874
queue_uses[i]++;
58295875

5830-
if (queue_uses[i] > 1)
5876+
/* There are limitations: A single queue cannot be
5877+
* opened and closed multiple times per cycle unless the
5878+
* gate stays open. Check for it.
5879+
*/
5880+
if (queue_uses[i] > 1 &&
5881+
!(prev->gate_mask & BIT(i)))
58315882
return false;
58325883
}
58335884
}
@@ -5871,6 +5922,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
58715922
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
58725923
struct tc_taprio_qopt_offload *qopt)
58735924
{
5925+
bool queue_configured[IGC_MAX_TX_QUEUES] = { };
58745926
u32 start_time = 0, end_time = 0;
58755927
size_t n;
58765928

@@ -5886,9 +5938,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
58865938
adapter->cycle_time = qopt->cycle_time;
58875939
adapter->base_time = qopt->base_time;
58885940

5889-
/* FIXME: be a little smarter about cases when the gate for a
5890-
* queue stays open for more than one entry.
5891-
*/
58925941
for (n = 0; n < qopt->num_entries; n++) {
58935942
struct tc_taprio_sched_entry *e = &qopt->entries[n];
58945943
int i;
@@ -5901,8 +5950,15 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
59015950
if (!(e->gate_mask & BIT(i)))
59025951
continue;
59035952

5904-
ring->start_time = start_time;
5953+
/* Check whether a queue stays open for more than one
5954+
* entry. If so, keep the start and advance the end
5955+
* time.
5956+
*/
5957+
if (!queue_configured[i])
5958+
ring->start_time = start_time;
59055959
ring->end_time = end_time;
5960+
5961+
queue_configured[i] = true;
59065962
}
59075963

59085964
start_time += e->interval;

drivers/net/ethernet/intel/igc/igc_regs.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,6 @@
5959
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
6060
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
6161

62-
/* MSI-X Table Register Descriptions */
63-
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
64-
6562
/* RSS registers */
6663
#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
6764

@@ -227,6 +224,7 @@
227224
/* Transmit Scheduling Registers */
228225
#define IGC_TQAVCTRL 0x3570
229226
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
227+
#define IGC_GTXOFFSET 0x3310
230228
#define IGC_BASET_L 0x3314
231229
#define IGC_BASET_H 0x3318
232230
#define IGC_QBVCYCLET 0x331C

drivers/net/ethernet/intel/igc/igc_tsn.c

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,35 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
4848
return new_flags;
4949
}
5050

51+
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
52+
{
53+
struct igc_hw *hw = &adapter->hw;
54+
u16 txoffset;
55+
56+
if (!is_any_launchtime(adapter))
57+
return;
58+
59+
switch (adapter->link_speed) {
60+
case SPEED_10:
61+
txoffset = IGC_TXOFFSET_SPEED_10;
62+
break;
63+
case SPEED_100:
64+
txoffset = IGC_TXOFFSET_SPEED_100;
65+
break;
66+
case SPEED_1000:
67+
txoffset = IGC_TXOFFSET_SPEED_1000;
68+
break;
69+
case SPEED_2500:
70+
txoffset = IGC_TXOFFSET_SPEED_2500;
71+
break;
72+
default:
73+
txoffset = 0;
74+
break;
75+
}
76+
77+
wr32(IGC_GTXOFFSET, txoffset);
78+
}
79+
5180
/* Returns the TSN specific registers to their default values after
5281
* the adapter is reset.
5382
*/
@@ -57,6 +86,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
5786
u32 tqavctrl;
5887
int i;
5988

89+
wr32(IGC_GTXOFFSET, 0);
6090
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
6191
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
6292

drivers/net/ethernet/intel/igc/igc_tsn.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,6 @@
66

77
int igc_tsn_offload_apply(struct igc_adapter *adapter);
88
int igc_tsn_reset(struct igc_adapter *adapter);
9+
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
910

1011
#endif /* _IGC_BASE_H */

0 commit comments

Comments
 (0)