|
| 1 | +net: ibmveth: Reset the adapter when unexpected states are detected |
| 2 | + |
| 3 | +jira LE-4649 |
| 4 | +Rebuild_History Non-Buildable kernel-5.14.0-570.60.1.el9_6 |
| 5 | +commit-author Dave Marquardt <davemarq@linux.ibm.com> |
| 6 | +commit 2c91e2319ed95f9b7608c9ac2ebd1a070918f1fc |
| 7 | +Empty-Commit: Cherry-Pick Conflicts during history rebuild. |
| 8 | +Will be included in final tarball splat. Ref for failed cherry-pick at: |
| 9 | +ciq/ciq_backports/kernel-5.14.0-570.60.1.el9_6/2c91e231.failed |
| 10 | + |
| 11 | +Reset the adapter through new function ibmveth_reset, called in |
| 12 | +WARN_ON situations. Removed conflicting and unneeded forward |
| 13 | +declaration. |
| 14 | + |
| 15 | + Signed-off-by: Dave Marquardt <davemarq@linux.ibm.com> |
| 16 | +Link: https://patch.msgid.link/20250501194944.283729-3-davemarq@linux.ibm.com |
| 17 | + Signed-off-by: Paolo Abeni <pabeni@redhat.com> |
| 18 | + |
| 19 | +(cherry picked from commit 2c91e2319ed95f9b7608c9ac2ebd1a070918f1fc) |
| 20 | + Signed-off-by: Jonathan Maple <jmaple@ciq.com> |
| 21 | + |
| 22 | +# Conflicts: |
| 23 | +# drivers/net/ethernet/ibm/ibmveth.h |
| 24 | +diff --cc drivers/net/ethernet/ibm/ibmveth.h |
| 25 | +index 8468e2c59d7a,b0a2460ec9f9..000000000000 |
| 26 | +--- a/drivers/net/ethernet/ibm/ibmveth.h |
| 27 | ++++ b/drivers/net/ethernet/ibm/ibmveth.h |
| 28 | +@@@ -134,38 -134,39 +134,58 @@@ struct ibmveth_rx_q |
| 29 | + }; |
| 30 | + |
| 31 | + struct ibmveth_adapter { |
| 32 | +++<<<<<<< HEAD |
| 33 | + + struct vio_dev *vdev; |
| 34 | + + struct net_device *netdev; |
| 35 | + + struct napi_struct napi; |
| 36 | + + unsigned int mcastFilterSize; |
| 37 | + + void * buffer_list_addr; |
| 38 | + + void * filter_list_addr; |
| 39 | + + void *tx_ltb_ptr[IBMVETH_MAX_QUEUES]; |
| 40 | + + unsigned int tx_ltb_size; |
| 41 | + + dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES]; |
| 42 | + + dma_addr_t buffer_list_dma; |
| 43 | + + dma_addr_t filter_list_dma; |
| 44 | + + struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; |
| 45 | + + struct ibmveth_rx_q rx_queue; |
| 46 | + + int rx_csum; |
| 47 | + + int large_send; |
| 48 | + + bool is_active_trunk; |
| 49 | +++======= |
| 50 | ++ struct vio_dev *vdev; |
| 51 | ++ struct net_device *netdev; |
| 52 | ++ struct napi_struct napi; |
| 53 | ++ struct work_struct work; |
| 54 | ++ unsigned int mcastFilterSize; |
| 55 | ++ void *buffer_list_addr; |
| 56 | ++ void *filter_list_addr; |
| 57 | ++ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES]; |
| 58 | ++ unsigned int tx_ltb_size; |
| 59 | ++ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES]; |
| 60 | ++ dma_addr_t buffer_list_dma; |
| 61 | ++ dma_addr_t filter_list_dma; |
| 62 | ++ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; |
| 63 | ++ struct ibmveth_rx_q rx_queue; |
| 64 | ++ int rx_csum; |
| 65 | ++ int large_send; |
| 66 | ++ bool is_active_trunk; |
| 67 | + - |
| 68 | + - u64 fw_ipv6_csum_support; |
| 69 | + - u64 fw_ipv4_csum_support; |
| 70 | + - u64 fw_large_send_support; |
| 71 | + - /* adapter specific stats */ |
| 72 | + - u64 replenish_task_cycles; |
| 73 | + - u64 replenish_no_mem; |
| 74 | + - u64 replenish_add_buff_failure; |
| 75 | + - u64 replenish_add_buff_success; |
| 76 | + - u64 rx_invalid_buffer; |
| 77 | + - u64 rx_no_buffer; |
| 78 | + - u64 tx_map_failed; |
| 79 | + - u64 tx_send_failed; |
| 80 | + - u64 tx_large_packets; |
| 81 | + - u64 rx_large_packets; |
| 82 | + - /* Ethtool settings */ |
| 83 | +++>>>>>>> 2c91e2319ed9 (net: ibmveth: Reset the adapter when unexpected states are detected) |
| 84 | + + |
| 85 | + + u64 fw_ipv6_csum_support; |
| 86 | + + u64 fw_ipv4_csum_support; |
| 87 | + + u64 fw_large_send_support; |
| 88 | + + /* adapter specific stats */ |
| 89 | + + u64 replenish_task_cycles; |
| 90 | + + u64 replenish_no_mem; |
| 91 | + + u64 replenish_add_buff_failure; |
| 92 | + + u64 replenish_add_buff_success; |
| 93 | + + u64 rx_invalid_buffer; |
| 94 | + + u64 rx_no_buffer; |
| 95 | + + u64 tx_map_failed; |
| 96 | + + u64 tx_send_failed; |
| 97 | + + u64 tx_large_packets; |
| 98 | + + u64 rx_large_packets; |
| 99 | + + /* Ethtool settings */ |
| 100 | + u8 duplex; |
| 101 | + u32 speed; |
| 102 | + }; |
| 103 | +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c |
| 104 | +index 04192190beba..cff494739bc9 100644 |
| 105 | +--- a/drivers/net/ethernet/ibm/ibmveth.c |
| 106 | ++++ b/drivers/net/ethernet/ibm/ibmveth.c |
| 107 | +@@ -39,8 +39,6 @@ |
| 108 | + #include "ibmveth.h" |
| 109 | + |
| 110 | + static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); |
| 111 | +-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, |
| 112 | +- bool reuse); |
| 113 | + static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); |
| 114 | + |
| 115 | + static struct kobj_type ktype_veth_pool; |
| 116 | +@@ -231,7 +229,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, |
| 117 | + index = pool->free_map[free_index]; |
| 118 | + skb = NULL; |
| 119 | + |
| 120 | +- BUG_ON(index == IBM_VETH_INVALID_MAP); |
| 121 | ++ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) { |
| 122 | ++ schedule_work(&adapter->work); |
| 123 | ++ goto bad_index_failure; |
| 124 | ++ } |
| 125 | + |
| 126 | + /* are we allocating a new buffer or recycling an old one */ |
| 127 | + if (pool->skbuff[index]) |
| 128 | +@@ -300,6 +301,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, |
| 129 | + DMA_FROM_DEVICE); |
| 130 | + dev_kfree_skb_any(pool->skbuff[index]); |
| 131 | + pool->skbuff[index] = NULL; |
| 132 | ++bad_index_failure: |
| 133 | + adapter->replenish_add_buff_failure++; |
| 134 | + |
| 135 | + mb(); |
| 136 | +@@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, |
| 137 | + } |
| 138 | + } |
| 139 | + |
| 140 | +-/* remove a buffer from a pool */ |
| 141 | +-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, |
| 142 | +- u64 correlator, bool reuse) |
| 143 | ++/** |
| 144 | ++ * ibmveth_remove_buffer_from_pool - remove a buffer from a pool |
| 145 | ++ * @adapter: adapter instance |
| 146 | ++ * @correlator: identifies pool and index |
| 147 | ++ * @reuse: whether to reuse buffer |
| 148 | ++ * |
| 149 | ++ * Return: |
| 150 | ++ * * %0 - success |
| 151 | ++ * * %-EINVAL - correlator maps to pool or index out of range |
| 152 | ++ * * %-EFAULT - pool and index map to null skb |
| 153 | ++ */ |
| 154 | ++static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, |
| 155 | ++ u64 correlator, bool reuse) |
| 156 | + { |
| 157 | + unsigned int pool = correlator >> 32; |
| 158 | + unsigned int index = correlator & 0xffffffffUL; |
| 159 | + unsigned int free_index; |
| 160 | + struct sk_buff *skb; |
| 161 | + |
| 162 | +- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); |
| 163 | +- BUG_ON(index >= adapter->rx_buff_pool[pool].size); |
| 164 | ++ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || |
| 165 | ++ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { |
| 166 | ++ schedule_work(&adapter->work); |
| 167 | ++ return -EINVAL; |
| 168 | ++ } |
| 169 | + |
| 170 | + skb = adapter->rx_buff_pool[pool].skbuff[index]; |
| 171 | +- BUG_ON(skb == NULL); |
| 172 | ++ if (WARN_ON(!skb)) { |
| 173 | ++ schedule_work(&adapter->work); |
| 174 | ++ return -EFAULT; |
| 175 | ++ } |
| 176 | + |
| 177 | + /* if we are going to reuse the buffer then keep the pointers around |
| 178 | + * but mark index as available. replenish will see the skb pointer and |
| 179 | +@@ -411,6 +429,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, |
| 180 | + mb(); |
| 181 | + |
| 182 | + atomic_dec(&(adapter->rx_buff_pool[pool].available)); |
| 183 | ++ |
| 184 | ++ return 0; |
| 185 | + } |
| 186 | + |
| 187 | + /* get the current buffer on the rx queue */ |
| 188 | +@@ -420,24 +440,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada |
| 189 | + unsigned int pool = correlator >> 32; |
| 190 | + unsigned int index = correlator & 0xffffffffUL; |
| 191 | + |
| 192 | +- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); |
| 193 | +- BUG_ON(index >= adapter->rx_buff_pool[pool].size); |
| 194 | ++ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || |
| 195 | ++ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { |
| 196 | ++ schedule_work(&adapter->work); |
| 197 | ++ return NULL; |
| 198 | ++ } |
| 199 | + |
| 200 | + return adapter->rx_buff_pool[pool].skbuff[index]; |
| 201 | + } |
| 202 | + |
| 203 | +-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, |
| 204 | +- bool reuse) |
| 205 | ++/** |
| 206 | ++ * ibmveth_rxq_harvest_buffer - Harvest buffer from pool |
| 207 | ++ * |
| 208 | ++ * @adapter: pointer to adapter |
| 209 | ++ * @reuse: whether to reuse buffer |
| 210 | ++ * |
| 211 | ++ * Context: called from ibmveth_poll |
| 212 | ++ * |
| 213 | ++ * Return: |
| 214 | ++ * * %0 - success |
| 215 | ++ * * other - non-zero return from ibmveth_remove_buffer_from_pool |
| 216 | ++ */ |
| 217 | ++static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, |
| 218 | ++ bool reuse) |
| 219 | + { |
| 220 | + u64 cor; |
| 221 | ++ int rc; |
| 222 | + |
| 223 | + cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; |
| 224 | +- ibmveth_remove_buffer_from_pool(adapter, cor, reuse); |
| 225 | ++ rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse); |
| 226 | ++ if (unlikely(rc)) |
| 227 | ++ return rc; |
| 228 | + |
| 229 | + if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
| 230 | + adapter->rx_queue.index = 0; |
| 231 | + adapter->rx_queue.toggle = !adapter->rx_queue.toggle; |
| 232 | + } |
| 233 | ++ |
| 234 | ++ return 0; |
| 235 | + } |
| 236 | + |
| 237 | + static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx) |
| 238 | +@@ -709,6 +749,35 @@ static int ibmveth_close(struct net_device *netdev) |
| 239 | + return 0; |
| 240 | + } |
| 241 | + |
| 242 | ++/** |
| 243 | ++ * ibmveth_reset - Handle scheduled reset work |
| 244 | ++ * |
| 245 | ++ * @w: pointer to work_struct embedded in adapter structure |
| 246 | ++ * |
| 247 | ++ * Context: This routine acquires rtnl_mutex and disables its NAPI through |
| 248 | ++ * ibmveth_close. It can't be called directly in a context that has |
| 249 | ++ * already acquired rtnl_mutex or disabled its NAPI, or directly from |
| 250 | ++ * a poll routine. |
| 251 | ++ * |
| 252 | ++ * Return: void |
| 253 | ++ */ |
| 254 | ++static void ibmveth_reset(struct work_struct *w) |
| 255 | ++{ |
| 256 | ++ struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work); |
| 257 | ++ struct net_device *netdev = adapter->netdev; |
| 258 | ++ |
| 259 | ++ netdev_dbg(netdev, "reset starting\n"); |
| 260 | ++ |
| 261 | ++ rtnl_lock(); |
| 262 | ++ |
| 263 | ++ dev_close(adapter->netdev); |
| 264 | ++ dev_open(adapter->netdev, NULL); |
| 265 | ++ |
| 266 | ++ rtnl_unlock(); |
| 267 | ++ |
| 268 | ++ netdev_dbg(netdev, "reset complete\n"); |
| 269 | ++} |
| 270 | ++ |
| 271 | + static int ibmveth_set_link_ksettings(struct net_device *dev, |
| 272 | + const struct ethtool_link_ksettings *cmd) |
| 273 | + { |
| 274 | +@@ -1324,7 +1393,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) |
| 275 | + wmb(); /* suggested by larson1 */ |
| 276 | + adapter->rx_invalid_buffer++; |
| 277 | + netdev_dbg(netdev, "recycling invalid buffer\n"); |
| 278 | +- ibmveth_rxq_harvest_buffer(adapter, true); |
| 279 | ++ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) |
| 280 | ++ break; |
| 281 | + } else { |
| 282 | + struct sk_buff *skb, *new_skb; |
| 283 | + int length = ibmveth_rxq_frame_length(adapter); |
| 284 | +@@ -1334,6 +1404,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) |
| 285 | + __sum16 iph_check = 0; |
| 286 | + |
| 287 | + skb = ibmveth_rxq_get_buffer(adapter); |
| 288 | ++ if (unlikely(!skb)) |
| 289 | ++ break; |
| 290 | + |
| 291 | + /* if the large packet bit is set in the rx queue |
| 292 | + * descriptor, the mss will be written by PHYP eight |
| 293 | +@@ -1357,10 +1429,12 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) |
| 294 | + if (rx_flush) |
| 295 | + ibmveth_flush_buffer(skb->data, |
| 296 | + length + offset); |
| 297 | +- ibmveth_rxq_harvest_buffer(adapter, true); |
| 298 | ++ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) |
| 299 | ++ break; |
| 300 | + skb = new_skb; |
| 301 | + } else { |
| 302 | +- ibmveth_rxq_harvest_buffer(adapter, false); |
| 303 | ++ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false))) |
| 304 | ++ break; |
| 305 | + skb_reserve(skb, offset); |
| 306 | + } |
| 307 | + |
| 308 | +@@ -1407,7 +1481,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) |
| 309 | + * then check once more to make sure we are done. |
| 310 | + */ |
| 311 | + lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); |
| 312 | +- BUG_ON(lpar_rc != H_SUCCESS); |
| 313 | ++ if (WARN_ON(lpar_rc != H_SUCCESS)) { |
| 314 | ++ schedule_work(&adapter->work); |
| 315 | ++ goto out; |
| 316 | ++ } |
| 317 | + |
| 318 | + if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) { |
| 319 | + lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
| 320 | +@@ -1428,7 +1505,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) |
| 321 | + if (napi_schedule_prep(&adapter->napi)) { |
| 322 | + lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
| 323 | + VIO_IRQ_DISABLE); |
| 324 | +- BUG_ON(lpar_rc != H_SUCCESS); |
| 325 | ++ WARN_ON(lpar_rc != H_SUCCESS); |
| 326 | + __napi_schedule(&adapter->napi); |
| 327 | + } |
| 328 | + return IRQ_HANDLED; |
| 329 | +@@ -1670,6 +1747,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
| 330 | + |
| 331 | + adapter->vdev = dev; |
| 332 | + adapter->netdev = netdev; |
| 333 | ++ INIT_WORK(&adapter->work, ibmveth_reset); |
| 334 | + adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); |
| 335 | + ibmveth_init_link_settings(netdev); |
| 336 | + |
| 337 | +@@ -1762,6 +1840,8 @@ static void ibmveth_remove(struct vio_dev *dev) |
| 338 | + struct ibmveth_adapter *adapter = netdev_priv(netdev); |
| 339 | + int i; |
| 340 | + |
| 341 | ++ cancel_work_sync(&adapter->work); |
| 342 | ++ |
| 343 | + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
| 344 | + kobject_put(&adapter->rx_buff_pool[i].kobj); |
| 345 | + |
| 346 | +* Unmerged path drivers/net/ethernet/ibm/ibmveth.h |
0 commit comments