forked from kensybernadeau/PLP_Titanium
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtitanium_lexer.py
99 lines (73 loc) · 2.47 KB
/
titanium_lexer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import ply.lex as lex
from ply.lex import TOKEN
import re
# reserved Words
reserved = {
'FUN_NO_PARAM': ['evaluate','BufferOverFlow','open','show'],
'FUN_WITHOUT_OPERATION': ['process'],
'FUN_SINGLE_PARAM': ['open'],
}
# All lexers must provide a list tokens that defines all of the possible token names
# that can be produced by the lexer. This list is always required and is used to
# perform a variety of validation checks. The tokens list is also used by the yacc.py module to identify terminals.
# When building the master regular expression, rules are added in the following order:
# 1. All tokens defined by functions are added in the same order as they appear in the lexer file.
# 2. Tokens defined by strings are added next by sorting them in order of decreasing regular expression
# length (longer expressions are added first).
# For example, if you wanted to have separate tokens for "=" and "==", you need to make sure that "==" is checked first.
# tokens
tokens = [
'INT',
'EQUALS', 'ID', 'DOT',
'COMMA', 'LP', 'RP', 'STRING','CHAR',
'LKEY','RKEY','SEMICOLON',
] + list(reserved)
# Each token is specified by writing a regular expression rule compatible with Python's re module.
# Each of these rules are defined by making declarations with a special prefix t_ to indicate that it defines a token.
# Declaration of Basic Regular Expressions
t_EQUALS = r'\='
t_DOT = r'\.'
t_COMMA = r'\,'
t_SEMICOLON = r'\;'
t_LP = r'\('
t_RP = r'\)'
# Regex Patterns
reg_fun_no_param = re.compile('|'.join(reserved['FUN_NO_PARAM']))
reg_fun_single_param = re.compile('|'.join(reserved['FUN_SINGLE_PARAM']))
reg_fun_without_operation = re.compile(reserved.get('FUN_WITHOUT_OPERATION')[0])
#Regex
@TOKEN(reg_fun_no_param .pattern)
def t_FUN_NO_PARAM(t):
return t
@TOKEN(reg_fun_single_param .pattern)
def t_FUN_SINGLE_PARAM(t):
return t
@TOKEN(reg_fun_without_operation .pattern)
def t_FUN_WITHOUT_OPERATION(t):
return t
# Generic Regex
def t_INT(t):
r'-?\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_STRING(t):
r'\"(.+?)\"'
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = 'ID'
return t
# Ignored characters
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Lexer
lexer = lex.lex(reflags=re.UNICODE)