@@ -574,7 +574,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
574574 dma_addr_t dma_addr ;
575575 unsigned char * dst ;
576576 int shift = 0 ;
577- int index ;
577+ int bufidx ;
578578 int i ;
579579
580580 if (!pool -> active )
@@ -590,14 +590,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
590590 * be 0.
591591 */
592592 for (i = ind_bufp -> index ; i < count ; ++ i ) {
593- index = pool -> free_map [pool -> next_free ];
593+ bufidx = pool -> free_map [pool -> next_free ];
594594
595595 /* We maybe reusing the skb from earlier resets. Allocate
596596 * only if necessary. But since the LTB may have changed
597597 * during reset (see init_rx_pools()), update LTB below
598598 * even if reusing skb.
599599 */
600- skb = pool -> rx_buff [index ].skb ;
600+ skb = pool -> rx_buff [bufidx ].skb ;
601601 if (!skb ) {
602602 skb = netdev_alloc_skb (adapter -> netdev ,
603603 pool -> buff_size );
@@ -612,24 +612,24 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
612612 pool -> next_free = (pool -> next_free + 1 ) % pool -> size ;
613613
614614 /* Copy the skb to the long term mapped DMA buffer */
615- offset = index * pool -> buff_size ;
615+ offset = bufidx * pool -> buff_size ;
616616 dst = pool -> long_term_buff .buff + offset ;
617617 memset (dst , 0 , pool -> buff_size );
618618 dma_addr = pool -> long_term_buff .addr + offset ;
619619
620620 /* add the skb to an rx_buff in the pool */
621- pool -> rx_buff [index ].data = dst ;
622- pool -> rx_buff [index ].dma = dma_addr ;
623- pool -> rx_buff [index ].skb = skb ;
624- pool -> rx_buff [index ].pool_index = pool -> index ;
625- pool -> rx_buff [index ].size = pool -> buff_size ;
621+ pool -> rx_buff [bufidx ].data = dst ;
622+ pool -> rx_buff [bufidx ].dma = dma_addr ;
623+ pool -> rx_buff [bufidx ].skb = skb ;
624+ pool -> rx_buff [bufidx ].pool_index = pool -> index ;
625+ pool -> rx_buff [bufidx ].size = pool -> buff_size ;
626626
627627 /* queue the rx_buff for the next send_subcrq_indirect */
628628 sub_crq = & ind_bufp -> indir_arr [ind_bufp -> index ++ ];
629629 memset (sub_crq , 0 , sizeof (* sub_crq ));
630630 sub_crq -> rx_add .first = IBMVNIC_CRQ_CMD ;
631631 sub_crq -> rx_add .correlator =
632- cpu_to_be64 ((u64 )& pool -> rx_buff [index ]);
632+ cpu_to_be64 ((u64 )& pool -> rx_buff [bufidx ]);
633633 sub_crq -> rx_add .ioba = cpu_to_be32 (dma_addr );
634634 sub_crq -> rx_add .map_id = pool -> long_term_buff .map_id ;
635635
@@ -671,10 +671,10 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
671671 sub_crq = & ind_bufp -> indir_arr [i ];
672672 rx_buff = (struct ibmvnic_rx_buff * )
673673 be64_to_cpu (sub_crq -> rx_add .correlator );
674- index = (int )(rx_buff - pool -> rx_buff );
675- pool -> free_map [pool -> next_free ] = index ;
676- dev_kfree_skb_any (pool -> rx_buff [index ].skb );
677- pool -> rx_buff [index ].skb = NULL ;
674+ bufidx = (int )(rx_buff - pool -> rx_buff );
675+ pool -> free_map [pool -> next_free ] = bufidx ;
676+ dev_kfree_skb_any (pool -> rx_buff [bufidx ].skb );
677+ pool -> rx_buff [bufidx ].skb = NULL ;
678678 }
679679 adapter -> replenish_add_buff_failure += ind_bufp -> index ;
680680 atomic_add (buffers_added , & pool -> available );
@@ -2205,7 +2205,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
22052205 unsigned int offset ;
22062206 int num_entries = 1 ;
22072207 unsigned char * dst ;
2208- int index = 0 ;
2208+ int bufidx = 0 ;
22092209 u8 proto = 0 ;
22102210
22112211 /* If a reset is in progress, drop the packet since
@@ -2239,9 +2239,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
22392239 else
22402240 tx_pool = & adapter -> tx_pool [queue_num ];
22412241
2242- index = tx_pool -> free_map [tx_pool -> consumer_index ];
2242+ bufidx = tx_pool -> free_map [tx_pool -> consumer_index ];
22432243
2244- if (index == IBMVNIC_INVALID_MAP ) {
2244+ if (bufidx == IBMVNIC_INVALID_MAP ) {
22452245 dev_kfree_skb_any (skb );
22462246 tx_send_failed ++ ;
22472247 tx_dropped ++ ;
@@ -2252,7 +2252,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
22522252
22532253 tx_pool -> free_map [tx_pool -> consumer_index ] = IBMVNIC_INVALID_MAP ;
22542254
2255- offset = index * tx_pool -> buf_size ;
2255+ offset = bufidx * tx_pool -> buf_size ;
22562256 dst = tx_pool -> long_term_buff .buff + offset ;
22572257 memset (dst , 0 , tx_pool -> buf_size );
22582258 data_dma_addr = tx_pool -> long_term_buff .addr + offset ;
@@ -2282,9 +2282,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
22822282 tx_pool -> consumer_index =
22832283 (tx_pool -> consumer_index + 1 ) % tx_pool -> num_buffers ;
22842284
2285- tx_buff = & tx_pool -> tx_buff [index ];
2285+ tx_buff = & tx_pool -> tx_buff [bufidx ];
22862286 tx_buff -> skb = skb ;
2287- tx_buff -> index = index ;
2287+ tx_buff -> index = bufidx ;
22882288 tx_buff -> pool_index = queue_num ;
22892289
22902290 memset (& tx_crq , 0 , sizeof (tx_crq ));
@@ -2296,9 +2296,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
22962296
22972297 if (skb_is_gso (skb ))
22982298 tx_crq .v1 .correlator =
2299- cpu_to_be32 (index | IBMVNIC_TSO_POOL_MASK );
2299+ cpu_to_be32 (bufidx | IBMVNIC_TSO_POOL_MASK );
23002300 else
2301- tx_crq .v1 .correlator = cpu_to_be32 (index );
2301+ tx_crq .v1 .correlator = cpu_to_be32 (bufidx );
23022302 tx_crq .v1 .dma_reg = cpu_to_be16 (tx_pool -> long_term_buff .map_id );
23032303 tx_crq .v1 .sge_len = cpu_to_be32 (skb -> len );
23042304 tx_crq .v1 .ioba = cpu_to_be64 (data_dma_addr );
0 commit comments