@@ -752,6 +752,17 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
752752 adapter -> rx_pool [i ].active = 0 ;
753753}
754754
755+ static void ibmvnic_set_safe_max_ind_descs (struct ibmvnic_adapter * adapter )
756+ {
757+ if (adapter -> cur_max_ind_descs > IBMVNIC_SAFE_IND_DESC ) {
758+ netdev_info (adapter -> netdev ,
759+ "set max ind descs from %u to safe limit %u\n" ,
760+ adapter -> cur_max_ind_descs ,
761+ IBMVNIC_SAFE_IND_DESC );
762+ adapter -> cur_max_ind_descs = IBMVNIC_SAFE_IND_DESC ;
763+ }
764+ }
765+
755766static void replenish_rx_pool (struct ibmvnic_adapter * adapter ,
756767 struct ibmvnic_rx_pool * pool )
757768{
@@ -839,7 +850,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
839850 sub_crq -> rx_add .len = cpu_to_be32 (pool -> buff_size << shift );
840851
841852 /* if send_subcrq_indirect queue is full, flush to VIOS */
842- if (ind_bufp -> index == IBMVNIC_MAX_IND_DESCS ||
853+ if (ind_bufp -> index == adapter -> cur_max_ind_descs ||
843854 i == count - 1 ) {
844855 lpar_rc =
845856 send_subcrq_indirect (adapter , handle ,
@@ -858,6 +869,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
858869failure :
859870 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED )
860871 dev_err_ratelimited (dev , "rx: replenish packet buffer failed\n" );
872+
873+ /* Detect platform limit H_PARAMETER */
874+ if (lpar_rc == H_PARAMETER )
875+ ibmvnic_set_safe_max_ind_descs (adapter );
876+
877+ /* For all error case, temporarily drop only this batch
878+ * Rely on TCP/IP retransmissions to retry and recover
879+ */
861880 for (i = ind_bufp -> index - 1 ; i >= 0 ; -- i ) {
862881 struct ibmvnic_rx_buff * rx_buff ;
863882
@@ -2377,16 +2396,28 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
23772396 rc = send_subcrq_direct (adapter , handle ,
23782397 (u64 * )ind_bufp -> indir_arr );
23792398
2380- if (rc )
2399+ if (rc ) {
2400+ dev_err_ratelimited (& adapter -> vdev -> dev ,
2401+ "tx_flush failed, rc=%u (%llu entries dma=%pad handle=%llx)\n" ,
2402+ rc , entries , & dma_addr , handle );
2403+ /* Detect platform limit H_PARAMETER */
2404+ if (rc == H_PARAMETER )
2405+ ibmvnic_set_safe_max_ind_descs (adapter );
2406+
2407+ /* For all error case, temporarily drop only this batch
2408+ * Rely on TCP/IP retransmissions to retry and recover
2409+ */
23812410 ibmvnic_tx_scrq_clean_buffer (adapter , tx_scrq );
2382- else
2411+ } else {
23832412 ind_bufp -> index = 0 ;
2413+ }
23842414 return rc ;
23852415}
23862416
23872417static netdev_tx_t ibmvnic_xmit (struct sk_buff * skb , struct net_device * netdev )
23882418{
23892419 struct ibmvnic_adapter * adapter = netdev_priv (netdev );
2420+ u32 cur_max_ind_descs = adapter -> cur_max_ind_descs ;
23902421 int queue_num = skb_get_queue_mapping (skb );
23912422 u8 * hdrs = (u8 * )& adapter -> tx_rx_desc_req ;
23922423 struct device * dev = & adapter -> vdev -> dev ;
@@ -2586,7 +2617,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
25862617 tx_crq .v1 .n_crq_elem = num_entries ;
25872618 tx_buff -> num_entries = num_entries ;
25882619 /* flush buffer if current entry can not fit */
2589- if (num_entries + ind_bufp -> index > IBMVNIC_MAX_IND_DESCS ) {
2620+ if (num_entries + ind_bufp -> index > cur_max_ind_descs ) {
25902621 lpar_rc = ibmvnic_tx_scrq_flush (adapter , tx_scrq , true);
25912622 if (lpar_rc != H_SUCCESS )
25922623 goto tx_flush_err ;
@@ -2599,7 +2630,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
25992630 ind_bufp -> index += num_entries ;
26002631 if (__netdev_tx_sent_queue (txq , skb -> len ,
26012632 netdev_xmit_more () &&
2602- ind_bufp -> index < IBMVNIC_MAX_IND_DESCS )) {
2633+ ind_bufp -> index < cur_max_ind_descs )) {
26032634 lpar_rc = ibmvnic_tx_scrq_flush (adapter , tx_scrq , true);
26042635 if (lpar_rc != H_SUCCESS )
26052636 goto tx_err ;
@@ -4013,7 +4044,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
40134044 }
40144045
40154046 dma_free_coherent (dev ,
4016- IBMVNIC_IND_ARR_SZ ,
4047+ IBMVNIC_IND_MAX_ARR_SZ ,
40174048 scrq -> ind_buf .indir_arr ,
40184049 scrq -> ind_buf .indir_dma );
40194050
@@ -4070,7 +4101,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
40704101
40714102 scrq -> ind_buf .indir_arr =
40724103 dma_alloc_coherent (dev ,
4073- IBMVNIC_IND_ARR_SZ ,
4104+ IBMVNIC_IND_MAX_ARR_SZ ,
40744105 & scrq -> ind_buf .indir_dma ,
40754106 GFP_KERNEL );
40764107
@@ -6376,6 +6407,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
63766407 rc = reset_sub_crq_queues (adapter );
63776408 }
63786409 } else {
6410+ if (adapter -> reset_reason == VNIC_RESET_MOBILITY ) {
6411+ /* After an LPM, reset the max number of indirect
6412+ * subcrq descriptors per H_SEND_SUB_CRQ_INDIRECT
6413+ * hcall to the default max (e.g POWER8 -> POWER10)
6414+ *
6415+ * If the new destination platform does not support
6416+ * the higher limit max (e.g. POWER10-> POWER8 LPM)
6417+ * H_PARAMETER will trigger automatic fallback to the
6418+ * safe minimum limit.
6419+ */
6420+ adapter -> cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS ;
6421+ }
6422+
63796423 rc = init_sub_crqs (adapter );
63806424 }
63816425
@@ -6527,6 +6571,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
65276571
65286572 adapter -> wait_for_reset = false;
65296573 adapter -> last_reset_time = jiffies ;
6574+ adapter -> cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS ;
65306575
65316576 rc = register_netdev (netdev );
65326577 if (rc ) {
0 commit comments