11//! Low-level Rust lexer.
22//!
3+ //! The idea with `librustc_lexer` is to make a reusable library,
4+ //! by separating out pure lexing and rustc-specific concerns, like spans,
5+ //! error reporting an interning. So, rustc_lexer operates directly on `&str`,
6+ //! produces simple tokens which are a pair of type-tag and a bit of original text,
7+ //! and does not report errors, instead storing them as flags on the token.
8+ //!
39//! Tokens produced by this lexer are not yet ready for parsing the Rust syntax,
410//! for that see `librustc_parse::lexer`, which converts this basic token stream
511//! into wide tokens used by actual parser.
@@ -719,6 +725,9 @@ impl Cursor<'_> {
719725
720726 // Check that amount of closing '#' symbols
721727 // is equal to the amount of opening ones.
728+ // Note that this will not consume extra trailing `#` characters:
729+ // `r###"abcde"####` is lexed as a `LexedRawString { n_hashes: 3 }`
730+ // followed by a `#` token.
722731 let mut hashes_left = n_start_hashes;
723732 let is_closing_hash = |c| {
724733 if c == '#' && hashes_left != 0 {
@@ -739,8 +748,8 @@ impl Cursor<'_> {
739748 possible_terminator_offset : None ,
740749 } ;
741750 } else if n_end_hashes > max_hashes {
742- // Keep track of possible terminators to give a hint about where there might be
743- // a missing terminator
751+ // Keep track of possible terminators to give a hint about
752+ // where there might be a missing terminator
744753 possible_terminator_offset =
745754 Some ( self . len_consumed ( ) - start_pos - n_end_hashes + prefix_len) ;
746755 max_hashes = n_end_hashes;
0 commit comments