1+ use std:: collections:: VecDeque ;
2+
3+ use rustc_data_structures:: captures:: Captures ;
4+ use rustc_data_structures:: fx:: FxHashSet ;
15use rustc_middle:: mir;
26use rustc_span:: Span ;
37
48use crate :: coverage:: graph:: { BasicCoverageBlock , CoverageGraph } ;
59use crate :: coverage:: mappings;
6- use crate :: coverage:: spans:: from_mir:: SpanFromMir ;
10+ use crate :: coverage:: spans:: from_mir:: {
11+ extract_covspans_and_holes_from_mir, ExtractedCovspans , Hole , SpanFromMir ,
12+ } ;
713use crate :: coverage:: ExtractedHirInfo ;
814
915mod from_mir;
@@ -19,50 +25,181 @@ pub(super) fn extract_refined_covspans(
1925 basic_coverage_blocks : & CoverageGraph ,
2026 code_mappings : & mut impl Extend < mappings:: CodeMapping > ,
2127) {
22- let sorted_span_buckets =
23- from_mir:: mir_to_initial_sorted_coverage_spans ( mir_body, hir_info, basic_coverage_blocks) ;
24- for bucket in sorted_span_buckets {
25- let refined_spans = refine_sorted_spans ( bucket) ;
26- code_mappings. extend ( refined_spans. into_iter ( ) . map ( |RefinedCovspan { span, bcb } | {
28+ let ExtractedCovspans { mut covspans, mut holes } =
29+ extract_covspans_and_holes_from_mir ( mir_body, hir_info, basic_coverage_blocks) ;
30+
31+ // First, perform the passes that need macro information.
32+ covspans. sort_by ( |a, b| basic_coverage_blocks. cmp_in_dominator_order ( a. bcb , b. bcb ) ) ;
33+ remove_unwanted_macro_spans ( & mut covspans) ;
34+ split_visible_macro_spans ( & mut covspans) ;
35+
36+ // We no longer need the extra information in `SpanFromMir`, so convert to `Covspan`.
37+ let mut covspans = covspans. into_iter ( ) . map ( SpanFromMir :: into_covspan) . collect :: < Vec < _ > > ( ) ;
38+
39+ let compare_covspans = |a : & Covspan , b : & Covspan | {
40+ compare_spans ( a. span , b. span )
41+ // After deduplication, we want to keep only the most-dominated BCB.
42+ . then_with ( || basic_coverage_blocks. cmp_in_dominator_order ( a. bcb , b. bcb ) . reverse ( ) )
43+ } ;
44+ covspans. sort_by ( compare_covspans) ;
45+
46+ // Among covspans with the same span, keep only one,
47+ // preferring the one with the most-dominated BCB.
48+ // (Ideally we should try to preserve _all_ non-dominating BCBs, but that
49+ // requires a lot more complexity in the span refiner, for little benefit.)
50+ covspans. dedup_by ( |b, a| a. span . source_equal ( b. span ) ) ;
51+
52+ // Sort the holes, and merge overlapping/adjacent holes.
53+ holes. sort_by ( |a, b| compare_spans ( a. span , b. span ) ) ;
54+ holes. dedup_by ( |b, a| a. merge_if_overlapping_or_adjacent ( b) ) ;
55+
56+ // Split the covspans into separate buckets that don't overlap any holes.
57+ let buckets = divide_spans_into_buckets ( covspans, & holes) ;
58+
59+ for mut covspans in buckets {
60+ // Make sure each individual bucket is internally sorted.
61+ covspans. sort_by ( compare_covspans) ;
62+ let _span = debug_span ! ( "processing bucket" , ?covspans) . entered ( ) ;
63+
64+ let mut covspans = remove_unwanted_overlapping_spans ( covspans) ;
65+ debug ! ( ?covspans, "after removing overlaps" ) ;
66+
67+ // Do one last merge pass, to simplify the output.
68+ covspans. dedup_by ( |b, a| a. merge_if_eligible ( b) ) ;
69+ debug ! ( ?covspans, "after merge" ) ;
70+
71+ code_mappings. extend ( covspans. into_iter ( ) . map ( |Covspan { span, bcb } | {
2772 // Each span produced by the refiner represents an ordinary code region.
2873 mappings:: CodeMapping { span, bcb }
2974 } ) ) ;
3075 }
3176}
3277
33- #[ derive( Debug ) ]
34- struct RefinedCovspan {
35- span : Span ,
36- bcb : BasicCoverageBlock ,
78+ /// Macros that expand into branches (e.g. `assert!`, `trace!`) tend to generate
79+ /// multiple condition/consequent blocks that have the span of the whole macro
80+ /// invocation, which is unhelpful. Keeping only the first such span seems to
81+ /// give better mappings, so remove the others.
82+ ///
83+ /// (The input spans should be sorted in BCB dominator order, so that the
84+ /// retained "first" span is likely to dominate the others.)
85+ fn remove_unwanted_macro_spans ( covspans : & mut Vec < SpanFromMir > ) {
86+ let mut seen_macro_spans = FxHashSet :: default ( ) ;
87+ covspans. retain ( |covspan| {
88+ // Ignore (retain) non-macro-expansion spans.
89+ if covspan. visible_macro . is_none ( ) {
90+ return true ;
91+ }
92+
93+ // Retain only the first macro-expanded covspan with this span.
94+ seen_macro_spans. insert ( covspan. span )
95+ } ) ;
3796}
3897
39- impl RefinedCovspan {
40- fn is_mergeable ( & self , other : & Self ) -> bool {
41- self . bcb == other. bcb
42- }
98+ /// When a span corresponds to a macro invocation that is visible from the
99+ /// function body, split it into two parts. The first part covers just the
100+ /// macro name plus `!`, and the second part covers the rest of the macro
101+ /// invocation. This seems to give better results for code that uses macros.
102+ fn split_visible_macro_spans ( covspans : & mut Vec < SpanFromMir > ) {
103+ let mut extra_spans = vec ! [ ] ;
43104
44- fn merge_from ( & mut self , other : & Self ) {
45- debug_assert ! ( self . is_mergeable( other) ) ;
46- self . span = self . span . to ( other. span ) ;
105+ covspans. retain ( |covspan| {
106+ let Some ( visible_macro) = covspan. visible_macro else { return true } ;
107+
108+ let split_len = visible_macro. as_str ( ) . len ( ) as u32 + 1 ;
109+ let ( before, after) = covspan. span . split_at ( split_len) ;
110+ if !covspan. span . contains ( before) || !covspan. span . contains ( after) {
111+ // Something is unexpectedly wrong with the split point.
112+ // The debug assertion in `split_at` will have already caught this,
113+ // but in release builds it's safer to do nothing and maybe get a
114+ // bug report for unexpected coverage, rather than risk an ICE.
115+ return true ;
116+ }
117+
118+ extra_spans. push ( SpanFromMir :: new ( before, covspan. visible_macro , covspan. bcb ) ) ;
119+ extra_spans. push ( SpanFromMir :: new ( after, covspan. visible_macro , covspan. bcb ) ) ;
120+ false // Discard the original covspan that we just split.
121+ } ) ;
122+
123+ // The newly-split spans are added at the end, so any previous sorting
124+ // is not preserved.
125+ covspans. extend ( extra_spans) ;
126+ }
127+
128+ /// Uses the holes to divide the given covspans into buckets, such that:
129+ /// - No span in any hole overlaps a bucket (truncating the spans if necessary).
130+ /// - The spans in each bucket are strictly after all spans in previous buckets,
131+ /// and strictly before all spans in subsequent buckets.
132+ ///
133+ /// The resulting buckets are sorted relative to each other, but might not be
134+ /// internally sorted.
135+ #[ instrument( level = "debug" ) ]
136+ fn divide_spans_into_buckets ( input_covspans : Vec < Covspan > , holes : & [ Hole ] ) -> Vec < Vec < Covspan > > {
137+ debug_assert ! ( input_covspans. is_sorted_by( |a, b| compare_spans( a. span, b. span) . is_le( ) ) ) ;
138+ debug_assert ! ( holes. is_sorted_by( |a, b| compare_spans( a. span, b. span) . is_le( ) ) ) ;
139+
140+ // Now we're ready to start carving holes out of the initial coverage spans,
141+ // and grouping them in buckets separated by the holes.
142+
143+ let mut input_covspans = VecDeque :: from ( input_covspans) ;
144+ let mut fragments = vec ! [ ] ;
145+
146+ // For each hole:
147+ // - Identify the spans that are entirely or partly before the hole.
148+ // - Put those spans in a corresponding bucket, truncated to the start of the hole.
149+ // - If one of those spans also extends after the hole, put the rest of it
150+ // in a "fragments" vector that is processed by the next hole.
151+ let mut buckets = ( 0 ..holes. len ( ) ) . map ( |_| vec ! [ ] ) . collect :: < Vec < _ > > ( ) ;
152+ for ( hole, bucket) in holes. iter ( ) . zip ( & mut buckets) {
153+ let fragments_from_prev = std:: mem:: take ( & mut fragments) ;
154+
155+ // Only inspect spans that precede or overlap this hole,
156+ // leaving the rest to be inspected by later holes.
157+ // (This relies on the spans and holes both being sorted.)
158+ let relevant_input_covspans =
159+ drain_front_while ( & mut input_covspans, |c| c. span . lo ( ) < hole. span . hi ( ) ) ;
160+
161+ for covspan in fragments_from_prev. into_iter ( ) . chain ( relevant_input_covspans) {
162+ let ( before, after) = covspan. split_around_hole_span ( hole. span ) ;
163+ bucket. extend ( before) ;
164+ fragments. extend ( after) ;
165+ }
47166 }
167+
168+ // After finding the spans before each hole, any remaining fragments/spans
169+ // form their own final bucket, after the final hole.
170+ // (If there were no holes, this will just be all of the initial spans.)
171+ fragments. extend ( input_covspans) ;
172+ buckets. push ( fragments) ;
173+
174+ buckets
175+ }
176+
177+ /// Similar to `.drain(..)`, but stops just before it would remove an item not
178+ /// satisfying the predicate.
179+ fn drain_front_while < ' a , T > (
180+ queue : & ' a mut VecDeque < T > ,
181+ mut pred_fn : impl FnMut ( & T ) -> bool ,
182+ ) -> impl Iterator < Item = T > + Captures < ' a > {
183+ std:: iter:: from_fn ( move || if pred_fn ( queue. front ( ) ?) { queue. pop_front ( ) } else { None } )
48184}
49185
50186/// Takes one of the buckets of (sorted) spans extracted from MIR, and "refines"
51- /// those spans by removing spans that overlap in unwanted ways, and by merging
52- /// compatible adjacent spans.
187+ /// those spans by removing spans that overlap in unwanted ways.
53188#[ instrument( level = "debug" ) ]
54- fn refine_sorted_spans ( sorted_spans : Vec < SpanFromMir > ) -> Vec < RefinedCovspan > {
189+ fn remove_unwanted_overlapping_spans ( sorted_spans : Vec < Covspan > ) -> Vec < Covspan > {
190+ debug_assert ! ( sorted_spans. is_sorted_by( |a, b| compare_spans( a. span, b. span) . is_le( ) ) ) ;
191+
55192 // Holds spans that have been read from the input vector, but haven't yet
56193 // been committed to the output vector.
57194 let mut pending = vec ! [ ] ;
58195 let mut refined = vec ! [ ] ;
59196
60197 for curr in sorted_spans {
61- pending. retain ( |prev : & SpanFromMir | {
198+ pending. retain ( |prev : & Covspan | {
62199 if prev. span . hi ( ) <= curr. span . lo ( ) {
63200 // There's no overlap between the previous/current covspans,
64201 // so move the previous one into the refined list.
65- refined. push ( RefinedCovspan { span : prev. span , bcb : prev . bcb } ) ;
202+ refined. push ( prev. clone ( ) ) ;
66203 false
67204 } else {
68205 // Otherwise, retain the previous covspan only if it has the
@@ -75,22 +212,57 @@ fn refine_sorted_spans(sorted_spans: Vec<SpanFromMir>) -> Vec<RefinedCovspan> {
75212 }
76213
77214 // Drain the rest of the pending list into the refined list.
78- for prev in pending {
79- refined. push ( RefinedCovspan { span : prev. span , bcb : prev. bcb } ) ;
215+ refined. extend ( pending) ;
216+ refined
217+ }
218+
219+ #[ derive( Clone , Debug ) ]
220+ struct Covspan {
221+ span : Span ,
222+ bcb : BasicCoverageBlock ,
223+ }
224+
225+ impl Covspan {
226+ /// Splits this covspan into 0-2 parts:
227+ /// - The part that is strictly before the hole span, if any.
228+ /// - The part that is strictly after the hole span, if any.
229+ fn split_around_hole_span ( & self , hole_span : Span ) -> ( Option < Self > , Option < Self > ) {
230+ let before = try {
231+ let span = self . span . trim_end ( hole_span) ?;
232+ Self { span, ..* self }
233+ } ;
234+ let after = try {
235+ let span = self . span . trim_start ( hole_span) ?;
236+ Self { span, ..* self }
237+ } ;
238+
239+ ( before, after)
80240 }
81241
82- // Do one last merge pass, to simplify the output.
83- debug ! ( ?refined , "before merge" ) ;
84- refined . dedup_by ( |b , a| {
85- if a . is_mergeable ( b ) {
86- debug ! ( ?a , ?b , "merging list-adjacent refined spans" ) ;
87- a . merge_from ( b ) ;
88- true
89- } else {
90- false
242+ /// If `self` and `other` can be merged (i.e. they have the same BCB),
243+ /// mutates `self.span` to also include `other.span` and returns true.
244+ ///
245+ /// Note that compatible covspans can be merged even if their underlying
246+ /// spans are not overlapping/adjacent; any space between them will also be
247+ /// part of the merged covspan.
248+ fn merge_if_eligible ( & mut self , other : & Self ) -> bool {
249+ if self . bcb != other . bcb {
250+ return false ;
91251 }
92- } ) ;
93- debug ! ( ?refined, "after merge" ) ;
94252
95- refined
253+ self . span = self . span . to ( other. span ) ;
254+ true
255+ }
256+ }
257+
258+ /// Compares two spans in (lo ascending, hi descending) order.
259+ fn compare_spans ( a : Span , b : Span ) -> std:: cmp:: Ordering {
260+ // First sort by span start.
261+ Ord :: cmp ( & a. lo ( ) , & b. lo ( ) )
262+ // If span starts are the same, sort by span end in reverse order.
263+ // This ensures that if spans A and B are adjacent in the list,
264+ // and they overlap but are not equal, then either:
265+ // - Span A extends further left, or
266+ // - Both have the same start and span A extends further right
267+ . then_with ( || Ord :: cmp ( & a. hi ( ) , & b. hi ( ) ) . reverse ( ) )
96268}
0 commit comments