|
1 | | -from typing import Union |
| 1 | +from typing import Union, cast |
2 | 2 |
|
3 | | -from ..language import Lexer, Source, TokenKind |
| 3 | +from ..language import Lexer, TokenKind |
| 4 | +from ..language.source import Source, is_source |
4 | 5 | from ..language.block_string import ( |
5 | 6 | dedent_block_string_value, |
6 | 7 | get_block_string_indentation, |
7 | 8 | ) |
8 | 9 | from ..language.lexer import is_punctuator_token_kind |
9 | | -from ..pyutils import inspect |
10 | 10 |
|
11 | 11 |
|
12 | 12 | def strip_ignored_characters(source: Union[str, Source]) -> str: |
@@ -65,14 +65,10 @@ def strip_ignored_characters(source: Union[str, Source]) -> str: |
65 | 65 |
|
66 | 66 | """Type description""" type Foo{"""Field description""" bar:String} |
67 | 67 | ''' |
68 | | - source_obj = Source(source) if isinstance(source, str) else source |
69 | | - if not isinstance(source_obj, Source): |
70 | | - raise TypeError( |
71 | | - f"Must provide string or Source. Received: {inspect(source_obj)}." |
72 | | - ) |
73 | | - |
74 | | - body = source_obj.body |
75 | | - lexer = Lexer(source_obj) |
| 68 | + source = cast(Source, source) if is_source(source) else Source(cast(str, source)) |
| 69 | + |
| 70 | + body = source.body |
| 71 | + lexer = Lexer(source) |
76 | 72 | stripped_body = "" |
77 | 73 | was_last_added_token_non_punctuator = False |
78 | 74 | while lexer.advance().kind != TokenKind.EOF: |
|
0 commit comments