@@ -2129,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
21292129 return ok ;
21302130}
21312131
2132- static int igc_xdp_init_tx_buffer (struct igc_tx_buffer * buffer ,
2133- struct xdp_frame * xdpf ,
2134- struct igc_ring * ring )
2135- {
2136- dma_addr_t dma ;
2137-
2138- dma = dma_map_single (ring -> dev , xdpf -> data , xdpf -> len , DMA_TO_DEVICE );
2139- if (dma_mapping_error (ring -> dev , dma )) {
2140- netdev_err_once (ring -> netdev , "Failed to map DMA for TX\n" );
2141- return - ENOMEM ;
2142- }
2143-
2144- buffer -> type = IGC_TX_BUFFER_TYPE_XDP ;
2145- buffer -> xdpf = xdpf ;
2146- buffer -> protocol = 0 ;
2147- buffer -> bytecount = xdpf -> len ;
2148- buffer -> gso_segs = 1 ;
2149- buffer -> time_stamp = jiffies ;
2150- dma_unmap_len_set (buffer , len , xdpf -> len );
2151- dma_unmap_addr_set (buffer , dma , dma );
2152- return 0 ;
2153- }
2154-
21552132/* This function requires __netif_tx_lock is held by the caller. */
21562133static int igc_xdp_init_tx_descriptor (struct igc_ring * ring ,
21572134 struct xdp_frame * xdpf )
21582135{
2159- struct igc_tx_buffer * buffer ;
2160- union igc_adv_tx_desc * desc ;
2161- u32 cmd_type , olinfo_status ;
2162- int err ;
2136+ struct skb_shared_info * sinfo = xdp_get_shared_info_from_frame (xdpf );
2137+ u8 nr_frags = unlikely (xdp_frame_has_frags (xdpf )) ? sinfo -> nr_frags : 0 ;
2138+ u16 count , index = ring -> next_to_use ;
2139+ struct igc_tx_buffer * head = & ring -> tx_buffer_info [index ];
2140+ struct igc_tx_buffer * buffer = head ;
2141+ union igc_adv_tx_desc * desc = IGC_TX_DESC (ring , index );
2142+ u32 olinfo_status , len = xdpf -> len , cmd_type ;
2143+ void * data = xdpf -> data ;
2144+ u16 i ;
21632145
2164- if (!igc_desc_unused (ring ))
2165- return - EBUSY ;
2146+ count = TXD_USE_COUNT (len );
2147+ for (i = 0 ; i < nr_frags ; i ++ )
2148+ count += TXD_USE_COUNT (skb_frag_size (& sinfo -> frags [i ]));
21662149
2167- buffer = & ring -> tx_buffer_info [ ring -> next_to_use ];
2168- err = igc_xdp_init_tx_buffer ( buffer , xdpf , ring );
2169- if ( err )
2170- return err ;
2150+ if ( igc_maybe_stop_tx ( ring , count + 3 )) {
2151+ /* this is a hard error */
2152+ return - EBUSY ;
2153+ }
21712154
2172- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2173- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2174- buffer -> bytecount ;
2175- olinfo_status = buffer -> bytecount << IGC_ADVTXD_PAYLEN_SHIFT ;
2155+ i = 0 ;
2156+ head -> bytecount = xdp_get_frame_len (xdpf );
2157+ head -> type = IGC_TX_BUFFER_TYPE_XDP ;
2158+ head -> gso_segs = 1 ;
2159+ head -> xdpf = xdpf ;
21762160
2177- desc = IGC_TX_DESC (ring , ring -> next_to_use );
2178- desc -> read .cmd_type_len = cpu_to_le32 (cmd_type );
2161+ olinfo_status = head -> bytecount << IGC_ADVTXD_PAYLEN_SHIFT ;
21792162 desc -> read .olinfo_status = cpu_to_le32 (olinfo_status );
2180- desc -> read .buffer_addr = cpu_to_le64 (dma_unmap_addr (buffer , dma ));
21812163
2182- netdev_tx_sent_queue (txring_txq (ring ), buffer -> bytecount );
2164+ for (;;) {
2165+ dma_addr_t dma ;
2166+
2167+ dma = dma_map_single (ring -> dev , data , len , DMA_TO_DEVICE );
2168+ if (dma_mapping_error (ring -> dev , dma )) {
2169+ netdev_err_once (ring -> netdev ,
2170+ "Failed to map DMA for TX\n" );
2171+ goto unmap ;
2172+ }
2173+
2174+ dma_unmap_len_set (buffer , len , len );
2175+ dma_unmap_addr_set (buffer , dma , dma );
21832176
2184- buffer -> next_to_watch = desc ;
2177+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2178+ IGC_ADVTXD_DCMD_IFCS | len ;
21852179
2186- ring -> next_to_use ++ ;
2187- if (ring -> next_to_use == ring -> count )
2188- ring -> next_to_use = 0 ;
2180+ desc -> read .cmd_type_len = cpu_to_le32 (cmd_type );
2181+ desc -> read .buffer_addr = cpu_to_le64 (dma );
2182+
2183+ buffer -> protocol = 0 ;
2184+
2185+ if (++ index == ring -> count )
2186+ index = 0 ;
2187+
2188+ if (i == nr_frags )
2189+ break ;
2190+
2191+ buffer = & ring -> tx_buffer_info [index ];
2192+ desc = IGC_TX_DESC (ring , index );
2193+ desc -> read .olinfo_status = 0 ;
2194+
2195+ data = skb_frag_address (& sinfo -> frags [i ]);
2196+ len = skb_frag_size (& sinfo -> frags [i ]);
2197+ i ++ ;
2198+ }
2199+ desc -> read .cmd_type_len |= cpu_to_le32 (IGC_TXD_DCMD );
2200+
2201+ netdev_tx_sent_queue (txring_txq (ring ), head -> bytecount );
2202+ /* set the timestamp */
2203+ head -> time_stamp = jiffies ;
2204+ /* set next_to_watch value indicating a packet is present */
2205+ head -> next_to_watch = desc ;
2206+ ring -> next_to_use = index ;
21892207
21902208 return 0 ;
2209+
2210+ unmap :
2211+ for (;;) {
2212+ buffer = & ring -> tx_buffer_info [index ];
2213+ if (dma_unmap_len (buffer , len ))
2214+ dma_unmap_page (ring -> dev ,
2215+ dma_unmap_addr (buffer , dma ),
2216+ dma_unmap_len (buffer , len ),
2217+ DMA_TO_DEVICE );
2218+ dma_unmap_len_set (buffer , len , 0 );
2219+ if (buffer == head )
2220+ break ;
2221+
2222+ if (!index )
2223+ index += ring -> count ;
2224+ index -- ;
2225+ }
2226+
2227+ return - ENOMEM ;
21912228}
21922229
21932230static struct igc_ring * igc_xdp_get_tx_ring (struct igc_adapter * adapter ,
@@ -2369,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
23692406 xdp_prepare_buff (& xdp , pktbuf - igc_rx_offset (rx_ring ),
23702407 igc_rx_offset (rx_ring ) + pkt_offset ,
23712408 size , true);
2409+ xdp_buff_clear_frags_flag (& xdp );
23722410
23732411 skb = igc_xdp_run_prog (adapter , & xdp );
23742412 }
@@ -4644,10 +4682,10 @@ void igc_update_stats(struct igc_adapter *adapter)
46444682 }
46454683
46464684 do {
4647- start = u64_stats_fetch_begin_irq (& ring -> rx_syncp );
4685+ start = u64_stats_fetch_begin (& ring -> rx_syncp );
46484686 _bytes = ring -> rx_stats .bytes ;
46494687 _packets = ring -> rx_stats .packets ;
4650- } while (u64_stats_fetch_retry_irq (& ring -> rx_syncp , start ));
4688+ } while (u64_stats_fetch_retry (& ring -> rx_syncp , start ));
46514689 bytes += _bytes ;
46524690 packets += _packets ;
46534691 }
@@ -4661,10 +4699,10 @@ void igc_update_stats(struct igc_adapter *adapter)
46614699 struct igc_ring * ring = adapter -> tx_ring [i ];
46624700
46634701 do {
4664- start = u64_stats_fetch_begin_irq (& ring -> tx_syncp );
4702+ start = u64_stats_fetch_begin (& ring -> tx_syncp );
46654703 _bytes = ring -> tx_stats .bytes ;
46664704 _packets = ring -> tx_stats .packets ;
4667- } while (u64_stats_fetch_retry_irq (& ring -> tx_syncp , start ));
4705+ } while (u64_stats_fetch_retry (& ring -> tx_syncp , start ));
46684706 bytes += _bytes ;
46694707 packets += _packets ;
46704708 }
@@ -5343,6 +5381,13 @@ static void igc_watchdog_task(struct work_struct *work)
53435381 break ;
53445382 }
53455383
5384+ /* Once the launch time has been set on the wire, there
5385+ * is a delay before the link speed can be determined
5386+ * based on link-up activity. Write into the register
5387+ * as soon as we know the correct link speed.
5388+ */
5389+ igc_tsn_adjust_txtime_offset (adapter );
5390+
53465391 if (adapter -> link_speed != SPEED_1000 )
53475392 goto no_wait ;
53485393
@@ -5812,9 +5857,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
58125857 return false;
58135858
58145859 for (n = 0 ; n < qopt -> num_entries ; n ++ ) {
5815- const struct tc_taprio_sched_entry * e ;
5860+ const struct tc_taprio_sched_entry * e , * prev ;
58165861 int i ;
58175862
5863+ prev = n ? & qopt -> entries [n - 1 ] : NULL ;
58185864 e = & qopt -> entries [n ];
58195865
58205866 /* i225 only supports "global" frame preemption
@@ -5827,7 +5873,12 @@ static bool validate_schedule(struct igc_adapter *adapter,
58275873 if (e -> gate_mask & BIT (i ))
58285874 queue_uses [i ]++ ;
58295875
5830- if (queue_uses [i ] > 1 )
5876+ /* There are limitations: A single queue cannot be
5877+ * opened and closed multiple times per cycle unless the
5878+ * gate stays open. Check for it.
5879+ */
5880+ if (queue_uses [i ] > 1 &&
5881+ !(prev -> gate_mask & BIT (i )))
58315882 return false;
58325883 }
58335884 }
@@ -5871,6 +5922,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
58715922static int igc_save_qbv_schedule (struct igc_adapter * adapter ,
58725923 struct tc_taprio_qopt_offload * qopt )
58735924{
5925+ bool queue_configured [IGC_MAX_TX_QUEUES ] = { };
58745926 u32 start_time = 0 , end_time = 0 ;
58755927 size_t n ;
58765928
@@ -5886,9 +5938,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
58865938 adapter -> cycle_time = qopt -> cycle_time ;
58875939 adapter -> base_time = qopt -> base_time ;
58885940
5889- /* FIXME: be a little smarter about cases when the gate for a
5890- * queue stays open for more than one entry.
5891- */
58925941 for (n = 0 ; n < qopt -> num_entries ; n ++ ) {
58935942 struct tc_taprio_sched_entry * e = & qopt -> entries [n ];
58945943 int i ;
@@ -5901,8 +5950,15 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
59015950 if (!(e -> gate_mask & BIT (i )))
59025951 continue ;
59035952
5904- ring -> start_time = start_time ;
5953+ /* Check whether a queue stays open for more than one
5954+ * entry. If so, keep the start and advance the end
5955+ * time.
5956+ */
5957+ if (!queue_configured [i ])
5958+ ring -> start_time = start_time ;
59055959 ring -> end_time = end_time ;
5960+
5961+ queue_configured [i ] = true;
59065962 }
59075963
59085964 start_time += e -> interval ;
0 commit comments