1+ use std:: collections:: VecDeque ;
2+
3+ use rustc_data_structures:: captures:: Captures ;
4+ use rustc_data_structures:: fx:: FxHashSet ;
15use rustc_middle:: mir;
26use rustc_span:: Span ;
37
48use crate :: coverage:: graph:: { BasicCoverageBlock , CoverageGraph } ;
59use crate :: coverage:: mappings;
6- use crate :: coverage:: spans:: from_mir:: SpanFromMir ;
10+ use crate :: coverage:: spans:: from_mir:: {
11+ extract_covspans_and_holes_from_mir, ExtractedCovspans , SpanFromMir ,
12+ } ;
713use crate :: coverage:: ExtractedHirInfo ;
814
915mod from_mir;
@@ -19,9 +25,68 @@ pub(super) fn extract_refined_covspans(
1925 basic_coverage_blocks : & CoverageGraph ,
2026 code_mappings : & mut impl Extend < mappings:: CodeMapping > ,
2127) {
22- let buckets =
23- from_mir:: mir_to_initial_sorted_coverage_spans ( mir_body, hir_info, basic_coverage_blocks) ;
24- for covspans in buckets {
28+ let ExtractedCovspans { mut covspans, mut holes } =
29+ extract_covspans_and_holes_from_mir ( mir_body, hir_info, basic_coverage_blocks) ;
30+
31+ covspans. sort_by ( |a, b| basic_coverage_blocks. cmp_in_dominator_order ( a. bcb , b. bcb ) ) ;
32+ remove_unwanted_macro_spans ( & mut covspans) ;
33+ split_visible_macro_spans ( & mut covspans) ;
34+
35+ let compare_covspans = |a : & SpanFromMir , b : & SpanFromMir | {
36+ compare_spans ( a. span , b. span )
37+ // After deduplication, we want to keep only the most-dominated BCB.
38+ . then_with ( || basic_coverage_blocks. cmp_in_dominator_order ( a. bcb , b. bcb ) . reverse ( ) )
39+ } ;
40+ covspans. sort_by ( compare_covspans) ;
41+
42+ // Among covspans with the same span, keep only one,
43+ // preferring the one with the most-dominated BCB.
44+ // (Ideally we should try to preserve _all_ non-dominating BCBs, but that
45+ // requires a lot more complexity in the span refiner, for little benefit.)
46+ covspans. dedup_by ( |b, a| a. span . source_equal ( b. span ) ) ;
47+
48+ // Sort the holes, and merge overlapping/adjacent holes.
49+ holes. sort_by ( |a, b| compare_spans ( a. span , b. span ) ) ;
50+ holes. dedup_by ( |b, a| a. merge_if_overlapping_or_adjacent ( b) ) ;
51+
52+ // Now we're ready to start carving holes out of the initial coverage spans,
53+ // and grouping them in buckets separated by the holes.
54+
55+ let mut input_covspans = VecDeque :: from ( covspans) ;
56+ let mut fragments: Vec < SpanFromMir > = vec ! [ ] ;
57+
58+ // For each hole:
59+ // - Identify the spans that are entirely or partly before the hole.
60+ // - Put those spans in a corresponding bucket, truncated to the start of the hole.
61+ // - If one of those spans also extends after the hole, put the rest of it
62+ // in a "fragments" vector that is processed by the next hole.
63+ let mut buckets = ( 0 ..holes. len ( ) ) . map ( |_| vec ! [ ] ) . collect :: < Vec < _ > > ( ) ;
64+ for ( hole, bucket) in holes. iter ( ) . zip ( & mut buckets) {
65+ let fragments_from_prev = std:: mem:: take ( & mut fragments) ;
66+
67+ // Only inspect spans that precede or overlap this hole,
68+ // leaving the rest to be inspected by later holes.
69+ // (This relies on the spans and holes both being sorted.)
70+ let relevant_input_covspans =
71+ drain_front_while ( & mut input_covspans, |c| c. span . lo ( ) < hole. span . hi ( ) ) ;
72+
73+ for covspan in fragments_from_prev. into_iter ( ) . chain ( relevant_input_covspans) {
74+ let ( before, after) = covspan. split_around_hole_span ( hole. span ) ;
75+ bucket. extend ( before) ;
76+ fragments. extend ( after) ;
77+ }
78+ }
79+
80+ // After finding the spans before each hole, any remaining fragments/spans
81+ // form their own final bucket, after the final hole.
82+ // (If there were no holes, this will just be all of the initial spans.)
83+ fragments. extend ( input_covspans) ;
84+ buckets. push ( fragments) ;
85+
86+ for mut covspans in buckets {
87+ // Make sure each individual bucket is internally sorted.
88+ covspans. sort_by ( compare_covspans) ;
89+
2590 let covspans = refine_sorted_spans ( covspans) ;
2691 code_mappings. extend ( covspans. into_iter ( ) . map ( |RefinedCovspan { span, bcb } | {
2792 // Each span produced by the refiner represents an ordinary code region.
@@ -30,6 +95,56 @@ pub(super) fn extract_refined_covspans(
3095 }
3196}
3297
98+ /// Macros that expand into branches (e.g. `assert!`, `trace!`) tend to generate
99+ /// multiple condition/consequent blocks that have the span of the whole macro
100+ /// invocation, which is unhelpful. Keeping only the first such span seems to
101+ /// give better mappings, so remove the others.
102+ ///
103+ /// (The input spans should be sorted in BCB dominator order, so that the
104+ /// retained "first" span is likely to dominate the others.)
105+ fn remove_unwanted_macro_spans ( covspans : & mut Vec < SpanFromMir > ) {
106+ let mut seen_macro_spans = FxHashSet :: default ( ) ;
107+ covspans. retain ( |covspan| {
108+ // Ignore (retain) non-macro-expansion spans.
109+ if covspan. visible_macro . is_none ( ) {
110+ return true ;
111+ }
112+
113+ // Retain only the first macro-expanded covspan with this span.
114+ seen_macro_spans. insert ( covspan. span )
115+ } ) ;
116+ }
117+
118+ /// When a span corresponds to a macro invocation that is visible from the
119+ /// function body, split it into two parts. The first part covers just the
120+ /// macro name plus `!`, and the second part covers the rest of the macro
121+ /// invocation. This seems to give better results for code that uses macros.
122+ fn split_visible_macro_spans ( covspans : & mut Vec < SpanFromMir > ) {
123+ let mut extra_spans = vec ! [ ] ;
124+
125+ covspans. retain ( |covspan| {
126+ let Some ( visible_macro) = covspan. visible_macro else { return true } ;
127+
128+ let split_len = visible_macro. as_str ( ) . len ( ) as u32 + 1 ;
129+ let ( before, after) = covspan. span . split_at ( split_len) ;
130+ if !covspan. span . contains ( before) || !covspan. span . contains ( after) {
131+ // Something is unexpectedly wrong with the split point.
132+ // The debug assertion in `split_at` will have already caught this,
133+ // but in release builds it's safer to do nothing and maybe get a
134+ // bug report for unexpected coverage, rather than risk an ICE.
135+ return true ;
136+ }
137+
138+ extra_spans. push ( SpanFromMir :: new ( before, covspan. visible_macro , covspan. bcb ) ) ;
139+ extra_spans. push ( SpanFromMir :: new ( after, covspan. visible_macro , covspan. bcb ) ) ;
140+ false // Discard the original covspan that we just split.
141+ } ) ;
142+
143+ // The newly-split spans are added at the end, so any previous sorting
144+ // is not preserved.
145+ covspans. extend ( extra_spans) ;
146+ }
147+
33148#[ derive( Debug ) ]
34149struct RefinedCovspan {
35150 span : Span ,
@@ -47,6 +162,15 @@ impl RefinedCovspan {
47162 }
48163}
49164
165+ /// Similar to `.drain(..)`, but stops just before it would remove an item not
166+ /// satisfying the predicate.
167+ fn drain_front_while < ' a , T > (
168+ queue : & ' a mut VecDeque < T > ,
169+ mut pred_fn : impl FnMut ( & T ) -> bool ,
170+ ) -> impl Iterator < Item = T > + Captures < ' a > {
171+ std:: iter:: from_fn ( move || if pred_fn ( queue. front ( ) ?) { queue. pop_front ( ) } else { None } )
172+ }
173+
50174/// Takes one of the buckets of (sorted) spans extracted from MIR, and "refines"
51175/// those spans by removing spans that overlap in unwanted ways, and by merging
52176/// compatible adjacent spans.
@@ -94,3 +218,15 @@ fn refine_sorted_spans(sorted_spans: Vec<SpanFromMir>) -> Vec<RefinedCovspan> {
94218
95219 refined
96220}
221+
222+ /// Compares two spans in (lo ascending, hi descending) order.
223+ fn compare_spans ( a : Span , b : Span ) -> std:: cmp:: Ordering {
224+ // First sort by span start.
225+ Ord :: cmp ( & a. lo ( ) , & b. lo ( ) )
226+ // If span starts are the same, sort by span end in reverse order.
227+ // This ensures that if spans A and B are adjacent in the list,
228+ // and they overlap but are not equal, then either:
229+ // - Span A extends further left, or
230+ // - Both have the same start and span A extends further right
231+ . then_with ( || Ord :: cmp ( & a. hi ( ) , & b. hi ( ) ) . reverse ( ) )
232+ }
0 commit comments