@@ -107,25 +107,30 @@ where
107107 }
108108}
109109
110- pub trait ToAttrTokenStream : sync:: DynSend + sync:: DynSync {
111- fn to_attr_token_stream ( & self ) -> AttrTokenStream ;
112- }
113-
114- impl ToAttrTokenStream for AttrTokenStream {
115- fn to_attr_token_stream ( & self ) -> AttrTokenStream {
116- self . clone ( )
117- }
118- }
119-
120- /// A lazy version of [`TokenStream`], which defers creation
121- /// of an actual `TokenStream` until it is needed.
122- /// `Box` is here only to reduce the structure size.
110+ /// A lazy version of [`AttrTokenStream`], which defers creation of an actual
111+ /// `AttrTokenStream` until it is needed.
123112#[ derive( Clone ) ]
124- pub struct LazyAttrTokenStream ( Arc < Box < dyn ToAttrTokenStream > > ) ;
113+ pub struct LazyAttrTokenStream ( Arc < LazyAttrTokenStreamInner > ) ;
125114
126115impl LazyAttrTokenStream {
127- pub fn new ( inner : impl ToAttrTokenStream + ' static ) -> LazyAttrTokenStream {
128- LazyAttrTokenStream ( Arc :: new ( Box :: new ( inner) ) )
116+ pub fn new_direct ( stream : AttrTokenStream ) -> LazyAttrTokenStream {
117+ LazyAttrTokenStream ( Arc :: new ( LazyAttrTokenStreamInner :: Direct ( stream) ) )
118+ }
119+
120+ pub fn new_pending (
121+ start_token : ( Token , Spacing ) ,
122+ cursor_snapshot : TokenCursor ,
123+ num_calls : u32 ,
124+ break_last_token : u32 ,
125+ node_replacements : Box < [ NodeReplacement ] > ,
126+ ) -> LazyAttrTokenStream {
127+ LazyAttrTokenStream ( Arc :: new ( LazyAttrTokenStreamInner :: Pending {
128+ start_token,
129+ cursor_snapshot,
130+ num_calls,
131+ break_last_token,
132+ node_replacements,
133+ } ) )
129134 }
130135
131136 pub fn to_attr_token_stream ( & self ) -> AttrTokenStream {
@@ -208,91 +213,118 @@ impl NodeRange {
208213 }
209214}
210215
211- // From a value of this type we can reconstruct the `TokenStream` seen by the
212- // `f` callback passed to a call to `Parser::collect_tokens`, by
213- // replaying the getting of the tokens. This saves us producing a `TokenStream`
214- // if it is never needed, e.g. a captured `macro_rules!` argument that is never
215- // passed to a proc macro. In practice, token stream creation happens rarely
216- // compared to calls to `collect_tokens` (see some statistics in #78736) so we
217- // are doing as little up-front work as possible.
218- //
219- // This also makes `Parser` very cheap to clone, since
220- // there is no intermediate collection buffer to clone.
221216pub struct LazyAttrTokenStreamImpl {
217+ // njn: all still pub?
222218 pub start_token : ( Token , Spacing ) ,
223219 pub cursor_snapshot : TokenCursor ,
224220 pub num_calls : u32 ,
225221 pub break_last_token : u32 ,
226222 pub node_replacements : Box < [ NodeReplacement ] > ,
227223}
228224
229- impl ToAttrTokenStream for LazyAttrTokenStreamImpl {
225+ enum LazyAttrTokenStreamInner {
226+ // The token stream has already been produced.
227+ Direct ( AttrTokenStream ) ,
228+
229+ // From a value of this type we can reconstruct the `TokenStream` seen by
230+ // the `f` callback passed to a call to `Parser::collect_tokens`, by
231+ // replaying the getting of the tokens. This saves us producing a
232+ // `TokenStream` if it is never needed, e.g. a captured `macro_rules!`
233+ // argument that is never passed to a proc macro. In practice, token stream
234+ // creation happens rarely compared to calls to `collect_tokens` (see some
235+ // statistics in #78736) so we are doing as little up-front work as
236+ // possible.
237+ //
238+ // This also makes `Parser` very cheap to clone, since there is no
239+ // intermediate collection buffer to clone.
240+ Pending {
241+ start_token : ( Token , Spacing ) ,
242+ cursor_snapshot : TokenCursor ,
243+ num_calls : u32 ,
244+ break_last_token : u32 ,
245+ node_replacements : Box < [ NodeReplacement ] > ,
246+ } ,
247+ }
248+
249+ impl LazyAttrTokenStreamInner {
230250 fn to_attr_token_stream ( & self ) -> AttrTokenStream {
231- // The token produced by the final call to `{,inlined_}next` was not
232- // actually consumed by the callback. The combination of chaining the
233- // initial token and using `take` produces the desired result - we
234- // produce an empty `TokenStream` if no calls were made, and omit the
235- // final token otherwise.
236- let mut cursor_snapshot = self . cursor_snapshot . clone ( ) ;
237- let tokens = iter:: once ( FlatToken :: Token ( self . start_token ) )
238- . chain ( iter:: repeat_with ( || FlatToken :: Token ( cursor_snapshot. next ( ) ) ) )
239- . take ( self . num_calls as usize ) ;
240-
241- if self . node_replacements . is_empty ( ) {
242- make_attr_token_stream ( tokens, self . break_last_token )
243- } else {
244- let mut tokens: Vec < _ > = tokens. collect ( ) ;
245- let mut node_replacements = self . node_replacements . to_vec ( ) ;
246- node_replacements. sort_by_key ( |( range, _) | range. 0 . start ) ;
251+ match self {
252+ LazyAttrTokenStreamInner :: Direct ( stream) => stream. clone ( ) ,
253+ LazyAttrTokenStreamInner :: Pending {
254+ start_token,
255+ cursor_snapshot,
256+ num_calls,
257+ break_last_token,
258+ node_replacements,
259+ } => {
260+ // The token produced by the final call to `{,inlined_}next` was not
261+ // actually consumed by the callback. The combination of chaining the
262+ // initial token and using `take` produces the desired result - we
263+ // produce an empty `TokenStream` if no calls were made, and omit the
264+ // final token otherwise.
265+ let mut cursor_snapshot = cursor_snapshot. clone ( ) ;
266+ let tokens = iter:: once ( FlatToken :: Token ( * start_token) )
267+ . chain ( iter:: repeat_with ( || FlatToken :: Token ( cursor_snapshot. next ( ) ) ) )
268+ . take ( * num_calls as usize ) ;
269+
270+ if node_replacements. is_empty ( ) {
271+ make_attr_token_stream ( tokens, * break_last_token)
272+ } else {
273+ let mut tokens: Vec < _ > = tokens. collect ( ) ;
274+ let mut node_replacements = node_replacements. to_vec ( ) ;
275+ node_replacements. sort_by_key ( |( range, _) | range. 0 . start ) ;
247276
248- #[ cfg( debug_assertions) ]
249- for [ ( node_range, tokens) , ( next_node_range, next_tokens) ] in
250- node_replacements. array_windows ( )
251- {
252- assert ! (
253- node_range. 0 . end <= next_node_range. 0 . start
254- || node_range. 0 . end >= next_node_range. 0 . end,
255- "Node ranges should be disjoint or nested: ({:?}, {:?}) ({:?}, {:?})" ,
256- node_range,
257- tokens,
258- next_node_range,
259- next_tokens,
260- ) ;
261- }
277+ #[ cfg( debug_assertions) ]
278+ for [ ( node_range, tokens) , ( next_node_range, next_tokens) ] in
279+ node_replacements. array_windows ( )
280+ {
281+ assert ! (
282+ node_range. 0 . end <= next_node_range. 0 . start
283+ || node_range. 0 . end >= next_node_range. 0 . end,
284+ "Node ranges should be disjoint or nested: ({:?}, {:?}) ({:?}, {:?})" ,
285+ node_range,
286+ tokens,
287+ next_node_range,
288+ next_tokens,
289+ ) ;
290+ }
262291
263- // Process the replace ranges, starting from the highest start
264- // position and working our way back. If have tokens like:
265- //
266- // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
267- //
268- // Then we will generate replace ranges for both
269- // the `#[cfg(FALSE)] field: bool` and the entire
270- // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
271- //
272- // By starting processing from the replace range with the greatest
273- // start position, we ensure that any (outer) replace range which
274- // encloses another (inner) replace range will fully overwrite the
275- // inner range's replacement.
276- for ( node_range, target) in node_replacements. into_iter ( ) . rev ( ) {
277- assert ! (
278- !node_range. 0 . is_empty( ) ,
279- "Cannot replace an empty node range: {:?}" ,
280- node_range. 0
281- ) ;
282-
283- // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s, plus
284- // enough `FlatToken::Empty`s to fill up the rest of the range. This keeps the
285- // total length of `tokens` constant throughout the replacement process, allowing
286- // us to do all replacements without adjusting indices.
287- let target_len = target. is_some ( ) as usize ;
288- tokens. splice (
289- ( node_range. 0 . start as usize ) ..( node_range. 0 . end as usize ) ,
290- target. into_iter ( ) . map ( |target| FlatToken :: AttrsTarget ( target) ) . chain (
291- iter:: repeat ( FlatToken :: Empty ) . take ( node_range. 0 . len ( ) - target_len) ,
292- ) ,
293- ) ;
292+ // Process the replace ranges, starting from the highest start
293+ // position and working our way back. If have tokens like:
294+ //
295+ // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
296+ //
297+ // Then we will generate replace ranges for both
298+ // the `#[cfg(FALSE)] field: bool` and the entire
299+ // `#[cfg(FALSE)] struct Foo { #[cfg(FALSE)] field: bool }`
300+ //
301+ // By starting processing from the replace range with the greatest
302+ // start position, we ensure that any (outer) replace range which
303+ // encloses another (inner) replace range will fully overwrite the
304+ // inner range's replacement.
305+ for ( node_range, target) in node_replacements. into_iter ( ) . rev ( ) {
306+ assert ! (
307+ !node_range. 0 . is_empty( ) ,
308+ "Cannot replace an empty node range: {:?}" ,
309+ node_range. 0
310+ ) ;
311+
312+ // Replace the tokens in range with zero or one `FlatToken::AttrsTarget`s,
313+ // plus enough `FlatToken::Empty`s to fill up the rest of the range. This
314+ // keeps the total length of `tokens` constant throughout the replacement
315+ // process, allowing us to do all replacements without adjusting indices.
316+ let target_len = target. is_some ( ) as usize ;
317+ tokens. splice (
318+ ( node_range. 0 . start as usize ) ..( node_range. 0 . end as usize ) ,
319+ target. into_iter ( ) . map ( |target| FlatToken :: AttrsTarget ( target) ) . chain (
320+ iter:: repeat ( FlatToken :: Empty )
321+ . take ( node_range. 0 . len ( ) - target_len) ,
322+ ) ,
323+ ) ;
324+ }
325+ make_attr_token_stream ( tokens. into_iter ( ) , * break_last_token)
326+ }
294327 }
295- make_attr_token_stream ( tokens. into_iter ( ) , self . break_last_token )
296328 }
297329 }
298330}
@@ -1011,6 +1043,7 @@ mod size_asserts {
10111043 static_assert_size ! ( AttrTokenStream , 8 ) ;
10121044 static_assert_size ! ( AttrTokenTree , 32 ) ;
10131045 static_assert_size ! ( LazyAttrTokenStream , 8 ) ;
1046+ static_assert_size ! ( LazyAttrTokenStreamInner , 96 ) ;
10141047 static_assert_size ! ( Option <LazyAttrTokenStream >, 8 ) ; // must be small, used in many AST nodes
10151048 static_assert_size ! ( TokenStream , 8 ) ;
10161049 static_assert_size ! ( TokenTree , 32 ) ;
0 commit comments