@@ -547,6 +547,17 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
547547 adapter -> rx_pool [i ].active = 0 ;
548548}
549549
550+ static void ibmvnic_set_safe_max_ind_descs (struct ibmvnic_adapter * adapter )
551+ {
552+ if (adapter -> cur_max_ind_descs > IBMVNIC_SAFE_IND_DESC ) {
553+ netdev_info (adapter -> netdev ,
554+ "set max ind descs from %u to safe limit %u\n" ,
555+ adapter -> cur_max_ind_descs ,
556+ IBMVNIC_SAFE_IND_DESC );
557+ adapter -> cur_max_ind_descs = IBMVNIC_SAFE_IND_DESC ;
558+ }
559+ }
560+
550561static void replenish_rx_pool (struct ibmvnic_adapter * adapter ,
551562 struct ibmvnic_rx_pool * pool )
552563{
@@ -633,7 +644,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
633644 sub_crq -> rx_add .len = cpu_to_be32 (pool -> buff_size << shift );
634645
635646 /* if send_subcrq_indirect queue is full, flush to VIOS */
636- if (ind_bufp -> index == IBMVNIC_MAX_IND_DESCS ||
647+ if (ind_bufp -> index == adapter -> cur_max_ind_descs ||
637648 i == count - 1 ) {
638649 lpar_rc =
639650 send_subcrq_indirect (adapter , handle ,
@@ -652,6 +663,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
652663failure :
653664 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED )
654665 dev_err_ratelimited (dev , "rx: replenish packet buffer failed\n" );
666+
667+ /* Detect platform limit H_PARAMETER */
668+ if (lpar_rc == H_PARAMETER )
669+ ibmvnic_set_safe_max_ind_descs (adapter );
670+
671+ /* For all error case, temporarily drop only this batch
672+ * Rely on TCP/IP retransmissions to retry and recover
673+ */
655674 for (i = ind_bufp -> index - 1 ; i >= 0 ; -- i ) {
656675 struct ibmvnic_rx_buff * rx_buff ;
657676
@@ -2172,16 +2191,28 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
21722191 rc = send_subcrq_direct (adapter , handle ,
21732192 (u64 * )ind_bufp -> indir_arr );
21742193
2175- if (rc )
2194+ if (rc ) {
2195+ dev_err_ratelimited (& adapter -> vdev -> dev ,
2196+ "tx_flush failed, rc=%u (%llu entries dma=%pad handle=%llx)\n" ,
2197+ rc , entries , & dma_addr , handle );
2198+ /* Detect platform limit H_PARAMETER */
2199+ if (rc == H_PARAMETER )
2200+ ibmvnic_set_safe_max_ind_descs (adapter );
2201+
2202+ /* For all error case, temporarily drop only this batch
2203+ * Rely on TCP/IP retransmissions to retry and recover
2204+ */
21762205 ibmvnic_tx_scrq_clean_buffer (adapter , tx_scrq );
2177- else
2206+ } else {
21782207 ind_bufp -> index = 0 ;
2208+ }
21792209 return rc ;
21802210}
21812211
21822212static netdev_tx_t ibmvnic_xmit (struct sk_buff * skb , struct net_device * netdev )
21832213{
21842214 struct ibmvnic_adapter * adapter = netdev_priv (netdev );
2215+ u32 cur_max_ind_descs = adapter -> cur_max_ind_descs ;
21852216 int queue_num = skb_get_queue_mapping (skb );
21862217 u8 * hdrs = (u8 * )& adapter -> tx_rx_desc_req ;
21872218 struct device * dev = & adapter -> vdev -> dev ;
@@ -2379,7 +2410,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
23792410 tx_crq .v1 .n_crq_elem = num_entries ;
23802411 tx_buff -> num_entries = num_entries ;
23812412 /* flush buffer if current entry can not fit */
2382- if (num_entries + ind_bufp -> index > IBMVNIC_MAX_IND_DESCS ) {
2413+ if (num_entries + ind_bufp -> index > cur_max_ind_descs ) {
23832414 lpar_rc = ibmvnic_tx_scrq_flush (adapter , tx_scrq , true);
23842415 if (lpar_rc != H_SUCCESS )
23852416 goto tx_flush_err ;
@@ -2392,7 +2423,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
23922423 ind_bufp -> index += num_entries ;
23932424 if (__netdev_tx_sent_queue (txq , skb -> len ,
23942425 netdev_xmit_more () &&
2395- ind_bufp -> index < IBMVNIC_MAX_IND_DESCS )) {
2426+ ind_bufp -> index < cur_max_ind_descs )) {
23962427 lpar_rc = ibmvnic_tx_scrq_flush (adapter , tx_scrq , true);
23972428 if (lpar_rc != H_SUCCESS )
23982429 goto tx_err ;
@@ -3865,7 +3896,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
38653896 }
38663897
38673898 dma_free_coherent (dev ,
3868- IBMVNIC_IND_ARR_SZ ,
3899+ IBMVNIC_IND_MAX_ARR_SZ ,
38693900 scrq -> ind_buf .indir_arr ,
38703901 scrq -> ind_buf .indir_dma );
38713902
@@ -3922,7 +3953,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
39223953
39233954 scrq -> ind_buf .indir_arr =
39243955 dma_alloc_coherent (dev ,
3925- IBMVNIC_IND_ARR_SZ ,
3956+ IBMVNIC_IND_MAX_ARR_SZ ,
39263957 & scrq -> ind_buf .indir_dma ,
39273958 GFP_KERNEL );
39283959
@@ -6227,6 +6258,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
62276258 rc = reset_sub_crq_queues (adapter );
62286259 }
62296260 } else {
6261+ if (adapter -> reset_reason == VNIC_RESET_MOBILITY ) {
6262+ /* After an LPM, reset the max number of indirect
6263+ * subcrq descriptors per H_SEND_SUB_CRQ_INDIRECT
6264+ * hcall to the default max (e.g POWER8 -> POWER10)
6265+ *
6266+ * If the new destination platform does not support
6267+ * the higher limit max (e.g. POWER10-> POWER8 LPM)
6268+ * H_PARAMETER will trigger automatic fallback to the
6269+ * safe minimum limit.
6270+ */
6271+ adapter -> cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS ;
6272+ }
6273+
62306274 rc = init_sub_crqs (adapter );
62316275 }
62326276
@@ -6378,6 +6422,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
63786422
63796423 adapter -> wait_for_reset = false;
63806424 adapter -> last_reset_time = jiffies ;
6425+ adapter -> cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS ;
63816426
63826427 rc = register_netdev (netdev );
63836428 if (rc ) {
0 commit comments