Skip to content

Commit

Permalink
Get path of preprocessor include.
Browse files Browse the repository at this point in the history
  • Loading branch information
Viatorus committed Nov 29, 2018
1 parent 93cace7 commit 4f9ab5f
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 7 deletions.
6 changes: 3 additions & 3 deletions quom/tokenizer/iterator.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
import copy
from typing import Union

from .tokenize_error import TokenizeError


class Iterator:
pass
Expand Down Expand Up @@ -31,6 +28,9 @@ def __next__(self):
self.it.next()
return tmp

def __str__(self):
return ''.join(self)


class Iterable:
def __init__(self, src):
Expand Down
15 changes: 12 additions & 3 deletions quom/tokenizer/preprocessor_tokenizer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import List

from .comment_tokenizer import scan_for_comment
from .iterator import LineWrapIterator
from .iterator import LineWrapIterator, Span
from .number_tokenizer import scan_for_number
from .quote_tokenizer import scan_for_quote
from .remaining_tokenizer import scan_for_remaining
Expand All @@ -17,8 +17,9 @@ def __init__(self, start, end, tokens: List[Token]):


class PreprocessorIncludeToken(PreprocessorToken):
def __init__(self, start, end, tokens: List[Token]):
def __init__(self, start, end, tokens: List[Token], path_start, path_end):
super().__init__(start, end, tokens)
self.path = Span(path_start, path_end)


class PreprocessorPragmaToken(PreprocessorToken):
Expand Down Expand Up @@ -71,6 +72,9 @@ def scan_for_preprocessor_include(start: LineWrapIterator, it: LineWrapIterator,
it = LineWrapIterator(it)

if it.curr == '"':
it.next()
path_start = it.copy()

# Parse until non escaped ".
backslashes = 0
while it.next() and it.curr != '\n' and (it.curr != '"' or backslashes % 2 != 0):
Expand All @@ -82,20 +86,25 @@ def scan_for_preprocessor_include(start: LineWrapIterator, it: LineWrapIterator,
# Check if end of line is reached.
if it.curr != '"':
raise TokenizeError("Character sequence not terminated!", it)
path_end = it.copy()
it.next()

elif it.curr == '<':
it.next()
path_start = it.copy()

# Scan until terminating >.
while it.next() and it.curr != '\n' and it.curr != '>':
pass

# Check if end of line is reached.
if it.curr != '>':
raise TokenizeError("Character sequence not terminated!", it)
path_end = it.copy()
it.next()

scan_for_line_end(it, tokens)
return PreprocessorIncludeToken(start, it, tokens)
return PreprocessorIncludeToken(start, it, tokens, path_start, path_end)


def scan_for_preprocessor_pragma(start: LineWrapIterator, it: LineWrapIterator, tokens: List[Token]):
Expand Down
2 changes: 1 addition & 1 deletion quom/tokenizer/token.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ def __init__(self, start, end):
self.span = Span(start, end) if start and end else None

def __str__(self):
return ''.join(self.span)
return str(self.span)
3 changes: 3 additions & 0 deletions tests/test_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,12 +351,15 @@ def test_preprocessor():
tokens = tokenize('#include "abc" ')
check_tokens(tokens, [PreprocessorToken])
check_tokens(tokens, [PreprocessorIncludeToken])
assert str(tokens[1].path) == 'abc'

tokens = tokenize('#include "abc\\"" ')
check_tokens(tokens, [PreprocessorToken])
assert str(tokens[1].path) == 'abc\\"'

tokens = tokenize('#include /*123*/ <abc> ')
check_tokens(tokens, [PreprocessorToken])
assert str(tokens[1].path) == 'abc'

with pytest.raises(TokenizeError):
tokenize('#include "abc\\" ')
Expand Down

0 comments on commit 4f9ab5f

Please sign in to comment.