@@ -557,7 +557,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
557557 if is_ident_start( tokenizer) { IDHash ( consume_name( tokenizer) ) }
558558 else if !tokenizer. is_eof( ) && match tokenizer. next_byte_unchecked( ) {
559559 // Any other valid case here already resulted in IDHash.
560- b'0' ... b'9' | b'-' => true ,
560+ b'0' ..= b'9' | b'-' => true ,
561561 _ => false ,
562562 } { Hash ( consume_name( tokenizer) ) }
563563 else { Delim ( '#' ) }
@@ -576,11 +576,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
576576 b'+' => {
577577 if (
578578 tokenizer. has_at_least( 1 )
579- && matches!( tokenizer. byte_at( 1 ) , b'0' ... b'9' )
579+ && matches!( tokenizer. byte_at( 1 ) , b'0' ..= b'9' )
580580 ) || (
581581 tokenizer. has_at_least( 2 )
582582 && tokenizer. byte_at( 1 ) == b'.'
583- && matches!( tokenizer. byte_at( 2 ) , b'0' ... b'9' )
583+ && matches!( tokenizer. byte_at( 2 ) , b'0' ..= b'9' )
584584 ) {
585585 consume_numeric( tokenizer)
586586 } else {
@@ -592,11 +592,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
592592 b'-' => {
593593 if (
594594 tokenizer. has_at_least( 1 )
595- && matches!( tokenizer. byte_at( 1 ) , b'0' ... b'9' )
595+ && matches!( tokenizer. byte_at( 1 ) , b'0' ..= b'9' )
596596 ) || (
597597 tokenizer. has_at_least( 2 )
598598 && tokenizer. byte_at( 1 ) == b'.'
599- && matches!( tokenizer. byte_at( 2 ) , b'0' ... b'9' )
599+ && matches!( tokenizer. byte_at( 2 ) , b'0' ..= b'9' )
600600 ) {
601601 consume_numeric( tokenizer)
602602 } else if tokenizer. starts_with( b"-->" ) {
@@ -611,7 +611,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
611611 } ,
612612 b'.' => {
613613 if tokenizer. has_at_least( 1 )
614- && matches!( tokenizer. byte_at( 1 ) , b'0' ... b'9'
614+ && matches!( tokenizer. byte_at( 1 ) , b'0' ..= b'9'
615615 ) {
616616 consume_numeric( tokenizer)
617617 } else {
@@ -627,7 +627,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
627627 Delim ( '/' )
628628 }
629629 }
630- b'0' ... b'9' => { consume_numeric( tokenizer) } ,
630+ b'0' ..= b'9' => { consume_numeric( tokenizer) } ,
631631 b':' => { tokenizer. advance( 1 ) ; Colon } ,
632632 b';' => { tokenizer. advance( 1 ) ; Semicolon } ,
633633 b'<' => {
@@ -644,7 +644,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
644644 if is_ident_start( tokenizer) { AtKeyword ( consume_name( tokenizer) ) }
645645 else { Delim ( '@' ) }
646646 } ,
647- b'a' ... b'z' | b'A' ... b'Z' | b'_' | b'\0' => { consume_ident_like( tokenizer) } ,
647+ b'a' ..= b'z' | b'A' ..= b'Z' | b'_' | b'\0' => { consume_ident_like( tokenizer) } ,
648648 b'[' => { tokenizer. advance( 1 ) ; SquareBracketBlock } ,
649649 b'\\' => {
650650 if !tokenizer. has_newline_at( 1 ) { consume_ident_like( tokenizer) }
@@ -745,8 +745,8 @@ fn consume_comment<'a>(tokenizer: &mut Tokenizer<'a>) -> &'a str {
745745 b'\n' | b'\x0C' | b'\r' => {
746746 tokenizer. consume_newline( ) ;
747747 }
748- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
749- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
748+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
749+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
750750 _ => {
751751 // ASCII or other leading byte.
752752 tokenizer. advance( 1 ) ;
@@ -807,8 +807,8 @@ fn consume_quoted_string<'a>(
807807 b'\n' | b'\r' | b'\x0C' => {
808808 return Err ( tokenizer. slice_from( start_pos) . into( ) )
809809 } ,
810- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
811- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
810+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
811+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
812812 _ => {
813813 // ASCII or other leading byte.
814814 tokenizer. advance( 1 ) ;
@@ -859,8 +859,8 @@ fn consume_quoted_string<'a>(
859859 string_bytes. extend( "\u{FFFD} " . as_bytes( ) ) ;
860860 continue ;
861861 }
862- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
863- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
862+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
863+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
864864 _ => {
865865 // ASCII or other leading byte.
866866 tokenizer. advance( 1 ) ;
@@ -882,10 +882,10 @@ fn consume_quoted_string<'a>(
882882fn is_ident_start ( tokenizer : & mut Tokenizer ) -> bool {
883883 !tokenizer. is_eof ( )
884884 && match_byte ! { tokenizer. next_byte_unchecked( ) ,
885- b'a' ... b'z' | b'A' ... b'Z' | b'_' | b'\0' => { true } ,
885+ b'a' ..= b'z' | b'A' ..= b'Z' | b'_' | b'\0' => { true } ,
886886 b'-' => {
887887 tokenizer. has_at_least( 1 ) && match_byte! { tokenizer. byte_at( 1 ) ,
888- b'a' ... b'z' | b'A' ... b'Z' | b'-' | b'_' | b'\0' => {
888+ b'a' ..= b'z' | b'A' ..= b'Z' | b'-' | b'_' | b'\0' => {
889889 true
890890 }
891891 b'\\' => { !tokenizer. has_newline_at( 1 ) }
@@ -921,7 +921,7 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
921921 return tokenizer. slice_from ( start_pos) . into ( ) ;
922922 }
923923 match_byte ! { tokenizer. next_byte_unchecked( ) ,
924- b'a' ... b'z' | b'A' ... b'Z' | b'0' ... b'9' | b'_' | b'-' => { tokenizer. advance( 1 ) } ,
924+ b'a' ..= b'z' | b'A' ..= b'Z' | b'0' ..= b'9' | b'_' | b'-' => { tokenizer. advance( 1 ) } ,
925925 b'\\' | b'\0' => {
926926 // * The tokenizer’s input is UTF-8 since it’s `&str`.
927927 // * start_pos is at a code point boundary
@@ -931,10 +931,10 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
931931 value_bytes = tokenizer. slice_from( start_pos) . as_bytes( ) . to_owned( ) ;
932932 break
933933 }
934- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
935- b'\xC0' ... b'\xEF' => { tokenizer. advance( 1 ) ; }
936- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
937- b => {
934+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
935+ b'\xC0' ..= b'\xEF' => { tokenizer. advance( 1 ) ; }
936+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
937+ _b => {
938938 return tokenizer. slice_from( start_pos) . into( ) ;
939939 }
940940 }
@@ -943,7 +943,7 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
943943 while !tokenizer. is_eof ( ) {
944944 let b = tokenizer. next_byte_unchecked ( ) ;
945945 match_byte ! { b,
946- b'a' ... b'z' | b'A' ... b'Z' | b'0' ... b'9' | b'_' | b'-' => {
946+ b'a' ..= b'z' | b'A' ..= b'Z' | b'0' ..= b'9' | b'_' | b'-' => {
947947 tokenizer. advance( 1 ) ;
948948 value_bytes. push( b) // ASCII
949949 }
@@ -957,19 +957,19 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
957957 tokenizer. advance( 1 ) ;
958958 value_bytes. extend( "\u{FFFD} " . as_bytes( ) ) ;
959959 } ,
960- b'\x80' ... b'\xBF' => {
960+ b'\x80' ..= b'\xBF' => {
961961 // This byte *is* part of a multi-byte code point,
962962 // we’ll end up copying the whole code point before this loop does something else.
963963 tokenizer. consume_continuation_byte( ) ;
964964 value_bytes. push( b)
965965 }
966- b'\xC0' ... b'\xEF' => {
966+ b'\xC0' ..= b'\xEF' => {
967967 // This byte *is* part of a multi-byte code point,
968968 // we’ll end up copying the whole code point before this loop does something else.
969969 tokenizer. advance( 1 ) ;
970970 value_bytes. push( b)
971971 }
972- b'\xF0' ... b'\xFF' => {
972+ b'\xF0' ..= b'\xFF' => {
973973 tokenizer. consume_4byte_intro( ) ;
974974 value_bytes. push( b)
975975 }
@@ -985,9 +985,9 @@ fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
985985
986986fn byte_to_hex_digit ( b : u8 ) -> Option < u32 > {
987987 Some ( match_byte ! { b,
988- b'0' ... b'9' => { b - b'0' } ,
989- b'a' ... b'f' => { b - b'a' + 10 } ,
990- b'A' ... b'F' => { b - b'A' + 10 } ,
988+ b'0' ..= b'9' => { b - b'0' } ,
989+ b'a' ..= b'f' => { b - b'a' + 10 } ,
990+ b'A' ..= b'F' => { b - b'A' + 10 } ,
991991 _ => {
992992 return None
993993 }
@@ -1032,7 +1032,7 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
10321032 let mut fractional_part: f64 = 0. ;
10331033 if tokenizer. has_at_least ( 1 )
10341034 && tokenizer. next_byte_unchecked ( ) == b'.'
1035- && matches ! ( tokenizer. byte_at( 1 ) , b'0' ... b'9' )
1035+ && matches ! ( tokenizer. byte_at( 1 ) , b'0' ..= b'9' )
10361036 {
10371037 is_integer = false ;
10381038 tokenizer. advance ( 1 ) ; // Consume '.'
@@ -1050,10 +1050,10 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
10501050 let mut value = sign * ( integral_part + fractional_part) ;
10511051
10521052 if tokenizer. has_at_least ( 1 ) && matches ! ( tokenizer. next_byte_unchecked( ) , b'e' | b'E' ) {
1053- if matches ! ( tokenizer. byte_at( 1 ) , b'0' ... b'9' )
1053+ if matches ! ( tokenizer. byte_at( 1 ) , b'0' ..= b'9' )
10541054 || ( tokenizer. has_at_least ( 2 )
10551055 && matches ! ( tokenizer. byte_at( 1 ) , b'+' | b'-' )
1056- && matches ! ( tokenizer. byte_at( 2 ) , b'0' ... b'9' ) )
1056+ && matches ! ( tokenizer. byte_at( 2 ) , b'0' ..= b'9' ) )
10571057 {
10581058 is_integer = false ;
10591059 tokenizer. advance ( 1 ) ;
@@ -1202,7 +1202,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
12021202 tokenizer. advance( 1 ) ;
12031203 return UnquotedUrl ( value. into( ) )
12041204 }
1205- b'\x01' ... b'\x08' | b'\x0B' | b'\x0E' ... b'\x1F' | b'\x7F' // non-printable
1205+ b'\x01' ..= b'\x08' | b'\x0B' | b'\x0E' ..= b'\x1F' | b'\x7F' // non-printable
12061206 | b'"' | b'\'' | b'(' => {
12071207 tokenizer. advance( 1 ) ;
12081208 return consume_bad_url( tokenizer, start_pos)
@@ -1216,8 +1216,8 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
12161216 string_bytes = tokenizer. slice_from( start_pos) . as_bytes( ) . to_owned( ) ;
12171217 break
12181218 }
1219- b'\x80' ... b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
1220- b'\xF0' ... b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
1219+ b'\x80' ..= b'\xBF' => { tokenizer. consume_continuation_byte( ) ; }
1220+ b'\xF0' ..= b'\xFF' => { tokenizer. consume_4byte_intro( ) ; }
12211221 _ => {
12221222 // ASCII or other leading byte.
12231223 tokenizer. advance( 1 ) ;
@@ -1236,7 +1236,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
12361236 tokenizer. advance( 1 ) ;
12371237 break ;
12381238 }
1239- b'\x01' ... b'\x08' | b'\x0B' | b'\x0E' ... b'\x1F' | b'\x7F' // non-printable
1239+ b'\x01' ..= b'\x08' | b'\x0B' | b'\x0E' ..= b'\x1F' | b'\x7F' // non-printable
12401240 | b'"' | b'\'' | b'(' => {
12411241 tokenizer. advance( 1 ) ;
12421242 return consume_bad_url( tokenizer, start_pos) ;
@@ -1254,13 +1254,13 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
12541254 tokenizer. advance( 1 ) ;
12551255 string_bytes. extend( "\u{FFFD} " . as_bytes( ) ) ;
12561256 }
1257- b'\x80' ... b'\xBF' => {
1257+ b'\x80' ..= b'\xBF' => {
12581258 // We’ll end up copying the whole code point
12591259 // before this loop does something else.
12601260 tokenizer. consume_continuation_byte( ) ;
12611261 string_bytes. push( b) ;
12621262 }
1263- b'\xF0' ... b'\xFF' => {
1263+ b'\xF0' ..= b'\xFF' => {
12641264 // We’ll end up copying the whole code point
12651265 // before this loop does something else.
12661266 tokenizer. consume_4byte_intro( ) ;
@@ -1367,7 +1367,7 @@ fn consume_escape(tokenizer: &mut Tokenizer) -> char {
13671367 return '\u{FFFD}' ;
13681368 } // Escaped EOF
13691369 match_byte ! { tokenizer. next_byte_unchecked( ) ,
1370- b'0' ... b'9' | b'A' ... b'F' | b'a' ... b'f' => {
1370+ b'0' ..= b'9' | b'A' ..= b'F' | b'a' ..= b'f' => {
13711371 let ( c, _) = consume_hex_digits( tokenizer) ;
13721372 if !tokenizer. is_eof( ) {
13731373 match_byte! { tokenizer. next_byte_unchecked( ) ,
0 commit comments