push
itamar itbaf@proton.me
Thu, 12 Mar 2026 15:37:41 +0200
6 files changed,
455 insertions(+),
0 deletions(-)
A
.gitignore
@@ -0,0 +1,160 @@
+# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/
A
intepreter.py
@@ -0,0 +1,50 @@
+from typing import Any + +from structs import Program, ASTNodeTypes, BinaryExpression, TokenTypes, AST, UnaryExpression + + +class Interpreter: + _ast: Program + + def __init__(self, ast): + self._ast = ast + + def run(self): + for expr in self._ast.body: + print(self._run_expr(expr)) + + def _run_expr(self, expr: AST): + match expr.type: + case ASTNodeTypes.Literal: + return expr.value + case ASTNodeTypes.BinaryExpression: + return self._run_binary(expr) + case ASTNodeTypes.UnaryExpression: + return self._run_unary(expr) + case other: + raise Exception('Unknown type of expression.') + + def _run_binary(self, expr: BinaryExpression) -> Any: + left = self._run_expr(expr.left) + right = self._run_expr(expr.right) + match expr.operator.type: + case TokenTypes.PLUS: + return left + right + case TokenTypes.MINUS: + return left - right + case TokenTypes.MULT: + return left * right + case TokenTypes.DIV: + return left / right + case other: + raise Exception('Operator not supported') + + def _run_unary(self, expr: UnaryExpression) -> Any: + value = self._run_expr(expr.value) + match expr.operator.type: + case TokenTypes.PLUS: + return value + case TokenTypes.MINUS: + return -value + case other: + raise Exception('Operator not supported')
A
lexer.py
@@ -0,0 +1,69 @@
+from typing import List + +from structs import TokenTypes, Token + + +KEYS_TO_TOKENS = { + '+': TokenTypes.PLUS, + '-': TokenTypes.MINUS, + '*': TokenTypes.MULT, + '/': TokenTypes.DIV, + '(': TokenTypes.OPEN_PAREN, + ')': TokenTypes.CLOSE_PAREN +} + + +class Lexer: + _offset: int = 0 + _string: str + _line: int = 1 + + @property + def _current(self): + return self._string[self._offset] + + @property + def _previous(self): + return self._string[self._offset - 1] + + def __init__(self, string: str): + self._string = string + + def _lex_number(self) -> Token: + number_str = '' + did_dot_appear = False + + while self._current.isdigit() or self._current == '.': + if self._current == '.': + did_dot_appear = True + number_str += self._current + self._offset += 1 + + if did_dot_appear: + return Token(TokenTypes.FLOAT, float(number_str), self._line) + else: + return Token(TokenTypes.INT, int(number_str), self._line) + + def next(self) -> Token: + if self._offset >= len(self._string): + return Token(TokenTypes.EOF, None, self._line) + + if self._current == '\n': + self._line += 1 + self._offset += 1 + return self.next() + elif self._current.isspace(): + self._offset += 1 + return self.next() + elif self._current.isdigit() or self._current == '.': + return self._lex_number() + elif self._current in KEYS_TO_TOKENS: + self._offset += 1 + return Token(KEYS_TO_TOKENS[self._previous], self._previous, self._line) + raise Exception(f'Invalid token "{self._current}".') + + def lex(self) -> List[Token]: + tokens = [] + while (token := self.next()).type != TokenTypes.EOF: + tokens.append(token) + return tokens
A
main.py
@@ -0,0 +1,28 @@
+import sys + +from intepreter import Interpreter +from lexer import Lexer +from parser import Parser + + +def main(): + print('->', end=' ') + sys.stdout.flush() + for line in sys.stdin: + lexer = Lexer(line) + tokens = lexer.lex() + # for token in tokens: + # print(token) + + parser = Parser(tokens) + ast = parser.parse() + # print(ast) + + interpreter = Interpreter(ast) + interpreter.run() + print('->', end=' ') + sys.stdout.flush() + + +if __name__ == '__main__': + main()
A
parser.py
@@ -0,0 +1,71 @@
+from typing import List + +from structs import (Token, AST, Program, ExpressionStatement, UnaryExpression, BinaryExpression, Literal, ASTNodeTypes, + TokenTypes) + + +class Parser: + _tokens: List[Token] + _index: int = 0 + + @property + def _current(self) -> Token: + return self._tokens[self._index] + + @property + def _previous(self) -> Token: + return self._tokens[self._index - 1] + + def __init__(self, tokens: List[Token]): + self._tokens = tokens + + def parse(self): + ast: Program = Program(ASTNodeTypes.Program, []) + + while self._index < len(self._tokens): + ast.body.append(self._plus_minus()) + + return ast + + def _match(self, *args: TokenTypes): + if value := (self._index < len(self._tokens) and self._current.type in args): + self._index += 1 + return value + + def _plus_minus(self) -> Literal: + expr = self._mult_div() + + while self._match(TokenTypes.PLUS, TokenTypes.MINUS): + operator = self._previous + right = self._plus_minus() + expr = BinaryExpression(expr, right, operator) + + return expr + + def _mult_div(self) -> Literal: + expr = self._unary() + + while self._match(TokenTypes.MULT, TokenTypes.DIV): + operator = self._previous + right = self._mult_div() + expr = BinaryExpression(expr, right, operator) + + return expr + + def _unary(self) -> Literal: + while self._match(TokenTypes.PLUS, TokenTypes.MINUS): + operator = self._previous + value = self._unary() + return UnaryExpression(operator, value) + + return self._literal() + + def _literal(self) -> Literal: + if self._match(TokenTypes.INT, TokenTypes.FLOAT): + return Literal(self._previous.value) + + if self._match(TokenTypes.OPEN_PAREN): + expr = self._plus_minus() + if not self._match(TokenTypes.CLOSE_PAREN): + raise Exception(f'Expected for close parentheses ")" at line {self._previous.line}.') + return expr
A
structs.py
@@ -0,0 +1,77 @@
+from dataclasses import dataclass +from typing import Any, List +from enum import auto, IntEnum + + +class TokenTypes(IntEnum): + INT = auto() + FLOAT = auto() + PLUS = auto() + MINUS = auto() + MULT = auto() + DIV = auto() + OPEN_PAREN = auto() + CLOSE_PAREN = auto() + EOF = auto() + + +@dataclass() +class Token: + type: TokenTypes + value: Any + line: int + + +class ASTNodeTypes(IntEnum): + Program = auto() + ExpressionStatement = auto() + BinaryExpression = auto() + UnaryExpression = auto() + Literal = auto() + + +@dataclass() +class AST: + type: ASTNodeTypes + + +@dataclass() +class Program(AST): + body: List[AST] + + +@dataclass() +class ExpressionStatement(AST): + expression: AST + + +@dataclass() +class Literal(AST): + value: Any + + def __init__(self, value: Any): + self.type = ASTNodeTypes.Literal + self.value = value + + +@dataclass() +class BinaryExpression(AST): + left: Literal + right: Literal + operator: Token + + def __init__(self, left: Literal, right: Literal, operator: Token): + self.type = ASTNodeTypes.BinaryExpression + self.left = left + self.right = right + self.operator = operator + + +@dataclass() +class UnaryExpression(Literal): + operator: Token + + def __init__(self, operator: Token, value: Any): + self.type = ASTNodeTypes.UnaryExpression + self.operator = operator + self.value = value