@@ -1207,9 +1207,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
12071207 unsigned int max_queues_per_port = num_online_cpus ();
12081208 struct gdma_context * gc = pci_get_drvdata (pdev );
12091209 struct gdma_irq_context * gic ;
1210- unsigned int max_irqs ;
1210+ unsigned int max_irqs , cpu ;
12111211 int nvec , irq ;
1212- int err , i , j ;
1212+ int err , i = 0 , j ;
12131213
12141214 if (max_queues_per_port > MANA_MAX_NUM_QUEUES )
12151215 max_queues_per_port = MANA_MAX_NUM_QUEUES ;
@@ -1242,6 +1242,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
12421242 err = request_irq (irq , mana_gd_intr , 0 , "mana_intr" , gic );
12431243 if (err )
12441244 goto free_irq ;
1245+
1246+ cpu = cpumask_local_spread (i , gc -> numa_node );
1247+ irq_set_affinity_and_hint (irq , cpumask_of (cpu ));
12451248 }
12461249
12471250 err = mana_gd_alloc_res_map (nvec , & gc -> msix_resource );
@@ -1257,6 +1260,8 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
12571260 for (j = i - 1 ; j >= 0 ; j -- ) {
12581261 irq = pci_irq_vector (pdev , j );
12591262 gic = & gc -> irq_contexts [j ];
1263+
1264+ irq_update_affinity_hint (irq , NULL );
12601265 free_irq (irq , gic );
12611266 }
12621267
@@ -1284,6 +1289,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
12841289 continue ;
12851290
12861291 gic = & gc -> irq_contexts [i ];
1292+
1293+ /* Need to clear the hint before free_irq */
1294+ irq_update_affinity_hint (irq , NULL );
12871295 free_irq (irq , gic );
12881296 }
12891297
@@ -1382,6 +1390,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13821390 if (!bar0_va )
13831391 goto free_gc ;
13841392
1393+ gc -> numa_node = dev_to_node (& pdev -> dev );
13851394 gc -> is_pf = mana_is_pf (pdev -> device );
13861395 gc -> bar0_va = bar0_va ;
13871396 gc -> dev = & pdev -> dev ;
0 commit comments