Skip to content

Commit 5bfbf2c

Browse files
Naveen Mamindlapalligregkh
authored andcommitted
octeontx2-af: Modify SMQ flush sequence to drop packets
[ Upstream commit 019aba0 ] The current implementation of SMQ flush sequence waits for the packets in the TM pipeline to be transmitted out of the link. This sequence doesn't succeed in HW when there is any issue with link such as lack of link credits, link down or any other traffic that is fully occupying the link bandwidth (QoS). This patch modifies the SMQ flush sequence to drop the packets after TL1 level (SQM) instead of polling for the packets to be sent out of RPM/CGX link. Fixes: 5d9b976 ("octeontx2-af: Support fixed transmit scheduler topology") Signed-off-by: Naveen Mamindlapalli <naveenm@marvell.com> Reviewed-by: Sunil Kovvuri Goutham <sgoutham@marvell.com> Link: https://patch.msgid.link/20240906045838.1620308-1-naveenm@marvell.com Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 7ae890e commit 5bfbf2c

File tree

2 files changed

+48
-14
lines changed

2 files changed

+48
-14
lines changed

drivers/net/ethernet/marvell/octeontx2/af/rvu.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -290,6 +290,7 @@ struct nix_mark_format {
290290

291291
/* smq(flush) to tl1 cir/pir info */
292292
struct nix_smq_tree_ctx {
293+
u16 schq;
293294
u64 cir_off;
294295
u64 cir_val;
295296
u64 pir_off;
@@ -299,8 +300,6 @@ struct nix_smq_tree_ctx {
299300
/* smq flush context */
300301
struct nix_smq_flush_ctx {
301302
int smq;
302-
u16 tl1_schq;
303-
u16 tl2_schq;
304303
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
305304
};
306305

drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c

Lines changed: 47 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2146,14 +2146,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
21462146
schq = smq;
21472147
for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
21482148
smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2149+
smq_tree_ctx->schq = schq;
21492150
if (lvl == NIX_TXSCH_LVL_TL1) {
2150-
smq_flush_ctx->tl1_schq = schq;
21512151
smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
21522152
smq_tree_ctx->pir_off = 0;
21532153
smq_tree_ctx->pir_val = 0;
21542154
parent_off = 0;
21552155
} else if (lvl == NIX_TXSCH_LVL_TL2) {
2156-
smq_flush_ctx->tl2_schq = schq;
21572156
smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
21582157
smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
21592158
parent_off = NIX_AF_TL2X_PARENT(schq);
@@ -2188,25 +2187,26 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
21882187
{
21892188
struct nix_txsch *txsch;
21902189
struct nix_hw *nix_hw;
2190+
int tl2, tl2_schq;
21912191
u64 regoff;
2192-
int tl2;
21932192

21942193
nix_hw = get_nix_hw(rvu->hw, blkaddr);
21952194
if (!nix_hw)
21962195
return;
21972196

21982197
/* loop through all TL2s with matching PF_FUNC */
21992198
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2199+
tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
22002200
for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
22012201
/* skip the smq(flush) TL2 */
2202-
if (tl2 == smq_flush_ctx->tl2_schq)
2202+
if (tl2 == tl2_schq)
22032203
continue;
22042204
/* skip unused TL2s */
22052205
if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
22062206
continue;
22072207
/* skip if PF_FUNC doesn't match */
22082208
if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2209-
(TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
2209+
(TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
22102210
~RVU_PFVF_FUNC_MASK)))
22112211
continue;
22122212
/* enable/disable XOFF */
@@ -2248,10 +2248,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
22482248
int smq, u16 pcifunc, int nixlf)
22492249
{
22502250
struct nix_smq_flush_ctx *smq_flush_ctx;
2251+
int err, restore_tx_en = 0, i;
22512252
int pf = rvu_get_pf(pcifunc);
22522253
u8 cgx_id = 0, lmac_id = 0;
2253-
int err, restore_tx_en = 0;
2254-
u64 cfg;
2254+
u16 tl2_tl3_link_schq;
2255+
u8 link, link_level;
2256+
u64 cfg, bmap = 0;
22552257

22562258
if (!is_rvu_otx2(rvu)) {
22572259
/* Skip SMQ flush if pkt count is zero */
@@ -2275,16 +2277,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
22752277
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
22762278
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
22772279

2278-
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2279-
/* Do SMQ flush and set enqueue xoff */
2280-
cfg |= BIT_ULL(50) | BIT_ULL(49);
2281-
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2282-
22832280
/* Disable backpressure from physical link,
22842281
* otherwise SMQ flush may stall.
22852282
*/
22862283
rvu_cgx_enadis_rx_bp(rvu, pf, false);
22872284

2285+
link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2286+
NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2287+
tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
2288+
link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
2289+
2290+
/* SMQ set enqueue xoff */
2291+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2292+
cfg |= BIT_ULL(50);
2293+
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2294+
2295+
/* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
2296+
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2297+
cfg = rvu_read64(rvu, blkaddr,
2298+
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2299+
if (!(cfg & BIT_ULL(12)))
2300+
continue;
2301+
bmap |= (1 << i);
2302+
cfg &= ~BIT_ULL(12);
2303+
rvu_write64(rvu, blkaddr,
2304+
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2305+
}
2306+
2307+
/* Do SMQ flush and set enqueue xoff */
2308+
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2309+
cfg |= BIT_ULL(50) | BIT_ULL(49);
2310+
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2311+
22882312
/* Wait for flush to complete */
22892313
err = rvu_poll_reg(rvu, blkaddr,
22902314
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -2293,6 +2317,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
22932317
"NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
22942318
nixlf, smq);
22952319

2320+
/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
2321+
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
2322+
if (!(bmap & (1 << i)))
2323+
continue;
2324+
cfg = rvu_read64(rvu, blkaddr,
2325+
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
2326+
cfg |= BIT_ULL(12);
2327+
rvu_write64(rvu, blkaddr,
2328+
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
2329+
}
2330+
22962331
/* clear XOFF on TL2s */
22972332
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
22982333
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);

0 commit comments

Comments
 (0)