Skip to content

Commit 8285d3f

Browse files
author
Shruti Parab
committed
bnxt_en: Extend queue stop/start for TX rings
JIRA: https://issues.redhat.com/browse/RHEL-76565 commit fe96d71 Author: Somnath Kotur <somnath.kotur@broadcom.com> Date: Wed Feb 12 17:12:38 2025 -0800 bnxt_en: Extend queue stop/start for TX rings In order to use queue_stop/queue_start to support the new Steering Tags, we need to free the TX ring and TX completion ring if it is a combined channel with TX/RX sharing the same NAPI. Otherwise TX completions will not have the updated Steering Tag. If TPH is not enabled, we just stop the TX ring without freeing the TX/TX cmpl rings. With that we can now add napi_disable() and napi_enable() during queue_stop()/ queue_start(). This will guarantee that NAPI will stop processing the completion entries in case there are additional pending entries in the completion rings after queue_stop(). There could be some NQEs sitting unprocessed while NAPI is disabled thereby leaving the NQ unarmed. Explicitly re-arm the NQ after napi_enable() in queue start so that NAPI will resume properly. Error handling in bnxt_queue_start() requires a reset. If a TX ring cannot be allocated or initialized properly, it will cause TX timeout. The reset will also free any partially allocated rings. We don't expect to hit this error path because re-allocating previously reserved and allocated rings with the same parameters should never fail. Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com> Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com> Signed-off-by: Michael Chan <michael.chan@broadcom.com> Link: https://patch.msgid.link/20250213011240.1640031-11-michael.chan@broadcom.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Shruti Parab <shruti.parab@broadcom.com>
1 parent e4afed6 commit 8285d3f

File tree

1 file changed

+110
-9
lines changed
  • drivers/net/ethernet/broadcom/bnxt

1 file changed

+110
-9
lines changed

drivers/net/ethernet/broadcom/bnxt/bnxt.c

Lines changed: 110 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11259,6 +11259,78 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
1125911259
return 0;
1126011260
}
1126111261

11262+
static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
11263+
{
11264+
struct bnxt_tx_ring_info *txr;
11265+
struct netdev_queue *txq;
11266+
struct bnxt_napi *bnapi;
11267+
int i;
11268+
11269+
bnapi = bp->bnapi[idx];
11270+
bnxt_for_each_napi_tx(i, bnapi, txr) {
11271+
WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
11272+
synchronize_net();
11273+
11274+
if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
11275+
txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11276+
if (txq) {
11277+
__netif_tx_lock_bh(txq);
11278+
netif_tx_stop_queue(txq);
11279+
__netif_tx_unlock_bh(txq);
11280+
}
11281+
}
11282+
11283+
if (!bp->tph_mode)
11284+
continue;
11285+
11286+
bnxt_hwrm_tx_ring_free(bp, txr, true);
11287+
bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
11288+
bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
11289+
bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
11290+
}
11291+
}
11292+
11293+
static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
11294+
{
11295+
struct bnxt_tx_ring_info *txr;
11296+
struct netdev_queue *txq;
11297+
struct bnxt_napi *bnapi;
11298+
int rc, i;
11299+
11300+
bnapi = bp->bnapi[idx];
11301+
/* All rings have been reserved and previously allocated.
11302+
* Reallocating with the same parameters should never fail.
11303+
*/
11304+
bnxt_for_each_napi_tx(i, bnapi, txr) {
11305+
if (!bp->tph_mode)
11306+
goto start_tx;
11307+
11308+
rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
11309+
if (rc)
11310+
return rc;
11311+
11312+
rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
11313+
if (rc)
11314+
return rc;
11315+
11316+
txr->tx_prod = 0;
11317+
txr->tx_cons = 0;
11318+
txr->tx_hw_cons = 0;
11319+
start_tx:
11320+
WRITE_ONCE(txr->dev_state, 0);
11321+
synchronize_net();
11322+
11323+
if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
11324+
continue;
11325+
11326+
txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
11327+
if (txq)
11328+
netif_tx_start_queue(txq);
11329+
}
11330+
11331+
return 0;
11332+
}
11333+
1126211334
static void bnxt_free_irq(struct bnxt *bp)
1126311335
{
1126411336
struct bnxt_irq *irq;
@@ -15621,7 +15693,9 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
1562115693
{
1562215694
struct bnxt *bp = netdev_priv(dev);
1562315695
struct bnxt_rx_ring_info *rxr, *clone;
15696+
struct bnxt_cp_ring_info *cpr;
1562415697
struct bnxt_vnic_info *vnic;
15698+
struct bnxt_napi *bnapi;
1562515699
int i, rc;
1562615700

1562715701
rxr = &bp->rx_ring[idx];
@@ -15639,27 +15713,39 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
1563915713

1564015714
bnxt_copy_rx_ring(bp, rxr, clone);
1564115715

15716+
bnapi = rxr->bnapi;
15717+
cpr = &bnapi->cp_ring;
15718+
1564215719
/* All rings have been reserved and previously allocated.
1564315720
* Reallocating with the same parameters should never fail.
1564415721
*/
1564515722
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
1564615723
if (rc)
15647-
return rc;
15724+
goto err_reset;
1564815725

1564915726
if (bp->tph_mode) {
1565015727
rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
1565115728
if (rc)
15652-
goto err_free_hwrm_rx_ring;
15729+
goto err_reset;
1565315730
}
1565415731

1565515732
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
1565615733
if (rc)
15657-
goto err_free_hwrm_cp_ring;
15734+
goto err_reset;
1565815735

1565915736
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1566015737
if (bp->flags & BNXT_FLAG_AGG_RINGS)
1566115738
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
1566215739

15740+
if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
15741+
rc = bnxt_tx_queue_start(bp, idx);
15742+
if (rc)
15743+
goto err_reset;
15744+
}
15745+
15746+
napi_enable(&bnapi->napi);
15747+
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
15748+
1566315749
for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
1566415750
vnic = &bp->vnic_info[i];
1566515751

@@ -15676,19 +15762,22 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
1567615762

1567715763
return 0;
1567815764

15679-
err_free_hwrm_cp_ring:
15680-
if (bp->tph_mode)
15681-
bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
15682-
err_free_hwrm_rx_ring:
15683-
bnxt_hwrm_rx_ring_free(bp, rxr, false);
15765+
err_reset:
15766+
netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
15767+
rc);
15768+
napi_enable(&bnapi->napi);
15769+
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
15770+
bnxt_reset_task(bp, true);
1568415771
return rc;
1568515772
}
1568615773

1568715774
static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
1568815775
{
1568915776
struct bnxt *bp = netdev_priv(dev);
1569015777
struct bnxt_rx_ring_info *rxr;
15778+
struct bnxt_cp_ring_info *cpr;
1569115779
struct bnxt_vnic_info *vnic;
15780+
struct bnxt_napi *bnapi;
1569215781
int i;
1569315782

1569415783
for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
@@ -15700,17 +15789,29 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
1570015789
/* Make sure NAPI sees that the VNIC is disabled */
1570115790
synchronize_net();
1570215791
rxr = &bp->rx_ring[idx];
15703-
cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
15792+
bnapi = rxr->bnapi;
15793+
cpr = &bnapi->cp_ring;
15794+
cancel_work_sync(&cpr->dim.work);
1570415795
bnxt_hwrm_rx_ring_free(bp, rxr, false);
1570515796
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
1570615797
page_pool_disable_direct_recycling(rxr->page_pool);
1570715798
if (bnxt_separate_head_pool())
1570815799
page_pool_disable_direct_recycling(rxr->head_pool);
1570915800

15801+
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
15802+
bnxt_tx_queue_stop(bp, idx);
15803+
15804+
/* Disable NAPI now after freeing the rings because HWRM_RING_FREE
15805+
* completion is handled in NAPI to guarantee no more DMA on that ring
15806+
* after seeing the completion.
15807+
*/
15808+
napi_disable(&bnapi->napi);
15809+
1571015810
if (bp->tph_mode) {
1571115811
bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
1571215812
bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
1571315813
}
15814+
bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
1571415815

1571515816
memcpy(qmem, rxr, sizeof(*rxr));
1571615817
bnxt_init_rx_ring_struct(bp, qmem);

0 commit comments

Comments
 (0)