@@ -505,25 +505,23 @@ fn inner_parse_loop<'root, 'tt>(
505505 item. idx += 1 ;
506506 next_items. push ( item) ;
507507 }
508- }
509- // We don't need a separator. Move the "dot" back to the beginning of the matcher
510- // and try to match again UNLESS we are only allowed to have _one_ repetition.
511- else if item . seq_op != Some ( mbe :: KleeneOp :: ZeroOrOne ) {
508+ } else if item . seq_op != Some ( mbe :: KleeneOp :: ZeroOrOne ) {
509+ // We don't need a separator. Move the "dot" back to the beginning of the
510+ // matcher and try to match again UNLESS we are only allowed to have _one_
511+ // repetition.
512512 item. match_cur = item. match_lo ;
513513 item. idx = 0 ;
514514 cur_items. push ( item) ;
515515 }
516- }
517- // If we are not in a repetition, then being at the end of a matcher means that we have
518- // reached the potential end of the input.
519- else {
516+ } else {
517+ // If we are not in a repetition, then being at the end of a matcher means that we
518+ // have reached the potential end of the input.
520519 eof_items. push ( item) ;
521520 }
522- }
523- // We are in the middle of a matcher.
524- else {
525- // Look at what token in the matcher we are trying to match the current token (`token`)
526- // against. Depending on that, we may generate new items.
521+ } else {
522+ // We are in the middle of a matcher. Look at what token in the matcher we are trying
523+ // to match the current token (`token`) against. Depending on that, we may generate new
524+ // items.
527525 match item. top_elts . get_tt ( idx) {
528526 // Need to descend into a sequence
529527 TokenTree :: Sequence ( sp, seq) => {
@@ -666,17 +664,14 @@ pub(super) fn parse_tt(
666664 // If we reached the EOF, check that there is EXACTLY ONE possible matcher. Otherwise,
667665 // either the parse is ambiguous (which should never happen) or there is a syntax error.
668666 if parser. token == token:: Eof {
669- if eof_items. len ( ) == 1 {
667+ return if eof_items. len ( ) == 1 {
670668 let matches =
671669 eof_items[ 0 ] . matches . iter_mut ( ) . map ( |dv| Lrc :: make_mut ( dv) . pop ( ) . unwrap ( ) ) ;
672- return nameize ( parser. sess , ms, matches) ;
670+ nameize ( parser. sess , ms, matches)
673671 } else if eof_items. len ( ) > 1 {
674- return Error (
675- parser. token . span ,
676- "ambiguity: multiple successful parses" . to_string ( ) ,
677- ) ;
672+ Error ( parser. token . span , "ambiguity: multiple successful parses" . to_string ( ) )
678673 } else {
679- return Failure (
674+ Failure (
680675 Token :: new (
681676 token:: Eof ,
682677 if parser. token . span . is_dummy ( ) {
@@ -686,8 +681,8 @@ pub(super) fn parse_tt(
686681 } ,
687682 ) ,
688683 "missing tokens in macro arguments" ,
689- ) ;
690- }
684+ )
685+ } ;
691686 }
692687 // Performance hack: eof_items may share matchers via Rc with other things that we want
693688 // to modify. Dropping eof_items now may drop these refcounts to 1, preventing an
@@ -699,9 +694,10 @@ pub(super) fn parse_tt(
699694 if bb_items. is_empty ( ) && next_items. is_empty ( ) {
700695 return Failure ( parser. token . clone ( ) , "no rules expected this token in macro call" ) ;
701696 }
702- // Another possibility is that we need to call out to parse some rust nonterminal
703- // (black-box) parser. However, if there is not EXACTLY ONE of these, something is wrong.
704- else if ( !bb_items. is_empty ( ) && !next_items. is_empty ( ) ) || bb_items. len ( ) > 1 {
697+
698+ if ( !bb_items. is_empty ( ) && !next_items. is_empty ( ) ) || bb_items. len ( ) > 1 {
699+ // We need to call out to parse some rust nonterminal (black-box) parser. But something
700+ // is wrong, because there is not EXACTLY ONE of these.
705701 let nts = bb_items
706702 . iter ( )
707703 . map ( |item| match item. top_elts . get_tt ( item. idx ) {
@@ -723,15 +719,15 @@ pub(super) fn parse_tt(
723719 ) ,
724720 ) ;
725721 }
726- // Dump all possible `next_items` into `cur_items` for the next iteration.
727- else if !next_items. is_empty ( ) {
728- // Now process the next token
722+
723+ if !next_items. is_empty ( ) {
724+ // Dump all possible `next_items` into `cur_items` for the next iteration. Then process
725+ // the next token.
729726 cur_items. extend ( next_items. drain ( ..) ) ;
730727 parser. to_mut ( ) . bump ( ) ;
731- }
732- // Finally, we have the case where we need to call the black-box parser to get some
733- // nonterminal.
734- else {
728+ } else {
729+ // Finally, we have the case where we need to call the black-box parser to get some
730+ // nonterminal.
735731 assert_eq ! ( bb_items. len( ) , 1 ) ;
736732
737733 let mut item = bb_items. pop ( ) . unwrap ( ) ;
0 commit comments