62 lines
1.6 KiB
Python
62 lines
1.6 KiB
Python
from lexer import Lexer
|
|
from lexer_token import Token, TokenType
|
|
from typing import Callable
|
|
from enum import Enum, auto
|
|
|
|
class PrecedenceType(Enum):
|
|
P_LOWEST = 0
|
|
P_EQUALS = auto()
|
|
P_LESSGREATER = auto()
|
|
P_SUM = auto()
|
|
P_PRODUCT = auto()
|
|
P_EXPONENT = auto()
|
|
P_PREFIX = auto()
|
|
P_CALL = auto()
|
|
P_INDEX = auto()
|
|
|
|
PRECEDENCES: dict[TokenType, PrecedenceType] = {
|
|
TokenType.PLUS: PrecedenceType.P_SUM,
|
|
TokenType.MINUS: PrecedenceType.P_SUM,
|
|
TokenType.ASTERISK: PrecedenceType.P_PRODUCT,
|
|
TokenType.SLASH: PrecedenceType.P_PRODUCT,
|
|
TokenType.MODULUS: PrecedenceType.P_PRODUCT,
|
|
TokenType.POW: PrecedenceType.P_EXPONENT
|
|
}
|
|
|
|
class Parser:
|
|
def __init__(self, lexer: Lexer) -> None:
|
|
self.lexer: Lexer = lexer
|
|
|
|
self.errors: list[str] = []
|
|
|
|
self.current_token: Token = None
|
|
self.peek_token: Token = None
|
|
|
|
self.prefix_parse_functions: dict[Token, Callable] = {} # -1
|
|
self.infix_parse_functions: dict[Token, Callable] = {} # 5 + 5
|
|
|
|
self.__next_token()
|
|
self.__next_token()
|
|
|
|
# region Parser helpers
|
|
def __next_token(self) -> None:
|
|
self.current_token = self.peek_token
|
|
self.peek_token = self.lexer.next_token()
|
|
|
|
def __peek_token_is(self, tt: TokenType) -> bool:
|
|
return self.peek_token.type == tt
|
|
|
|
def __expect_peek(self, tt: TokenType) -> bool:
|
|
if self.__peek_token_is(tt):
|
|
self.__next_token()
|
|
return True
|
|
else:
|
|
self.__peek_error(tt)
|
|
return False
|
|
|
|
def __peek_error(self, tt: TokenType):
|
|
self.errors.append(f"Expected next token to be {tt}, got {self.peek_token.type} instead.")
|
|
|
|
def __no_prefix_parse_function_error(self, tt: TokenType):
|
|
self.errors.append(f"No Prefix Parse Function for {tt} found.")
|
|
# endregion |