Skip to content

Commit 10c04c9

Browse files
committed
net: mana: Assign interrupts to CPUs based on NUMA nodes
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2168970 commit 71fa688 Author: Saurabh Sengar <ssengar@linux.microsoft.com> Date: Mon Oct 31 23:06:01 2022 -0700 net: mana: Assign interrupts to CPUs based on NUMA nodes In large VMs with multiple NUMA nodes, network performance is usually best if network interrupts are all assigned to the same virtual NUMA node. This patch assigns online CPU according to a numa aware policy, local cpus are returned first, followed by non-local ones, then it wraps around. Signed-off-by: Saurabh Sengar <ssengar@linux.microsoft.com> Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com> Link: https://lore.kernel.org/r/1667282761-11547-1-git-send-email-ssengar@linux.microsoft.com Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
1 parent 486b0d2 commit 10c04c9

File tree

2 files changed

+28
-3
lines changed

2 files changed

+28
-3
lines changed

drivers/net/ethernet/microsoft/mana/gdma.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -353,6 +353,7 @@ struct gdma_context {
353353
void __iomem *shm_base;
354354
void __iomem *db_page_base;
355355
u32 db_page_size;
356+
int numa_node;
356357

357358
/* Shared memory chanenl (used to bootstrap HWC) */
358359
struct shm_channel shm_channel;

drivers/net/ethernet/microsoft/mana/gdma_main.c

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1208,8 +1208,10 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
12081208
struct gdma_context *gc = pci_get_drvdata(pdev);
12091209
struct gdma_irq_context *gic;
12101210
unsigned int max_irqs;
1211+
u16 *cpus;
1212+
cpumask_var_t req_mask;
12111213
int nvec, irq;
1212-
int err, i, j;
1214+
int err, i = 0, j;
12131215

12141216
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
12151217
max_queues_per_port = MANA_MAX_NUM_QUEUES;
@@ -1228,21 +1230,39 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
12281230
goto free_irq_vector;
12291231
}
12301232

1233+
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
1234+
err = -ENOMEM;
1235+
goto free_irq;
1236+
}
1237+
1238+
cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
1239+
if (!cpus) {
1240+
err = -ENOMEM;
1241+
goto free_mask;
1242+
}
1243+
for (i = 0; i < nvec; i++)
1244+
cpus[i] = cpumask_local_spread(i, gc->numa_node);
1245+
12311246
for (i = 0; i < nvec; i++) {
1247+
cpumask_set_cpu(cpus[i], req_mask);
12321248
gic = &gc->irq_contexts[i];
12331249
gic->handler = NULL;
12341250
gic->arg = NULL;
12351251

12361252
irq = pci_irq_vector(pdev, i);
12371253
if (irq < 0) {
12381254
err = irq;
1239-
goto free_irq;
1255+
goto free_mask;
12401256
}
12411257

12421258
err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
12431259
if (err)
1244-
goto free_irq;
1260+
goto free_mask;
1261+
irq_set_affinity_and_hint(irq, req_mask);
1262+
cpumask_clear(req_mask);
12451263
}
1264+
free_cpumask_var(req_mask);
1265+
kfree(cpus);
12461266

12471267
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
12481268
if (err)
@@ -1253,6 +1273,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
12531273

12541274
return 0;
12551275

1276+
free_mask:
1277+
free_cpumask_var(req_mask);
1278+
kfree(cpus);
12561279
free_irq:
12571280
for (j = i - 1; j >= 0; j--) {
12581281
irq = pci_irq_vector(pdev, j);
@@ -1382,6 +1405,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13821405
if (!bar0_va)
13831406
goto free_gc;
13841407

1408+
gc->numa_node = dev_to_node(&pdev->dev);
13851409
gc->is_pf = mana_is_pf(pdev->device);
13861410
gc->bar0_va = bar0_va;
13871411
gc->dev = &pdev->dev;

0 commit comments

Comments
 (0)