aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Fun.hs14
-rw-r--r--src/Fun/Compiler.hs17
-rw-r--r--src/Fun/Parser.hs192
-rw-r--r--src/Fun/Tree.hs18
-rw-r--r--src/context.c111
-rw-r--r--src/lint.c21
-rw-r--r--src/log.c24
-rw-r--r--src/main.c27
-rw-r--r--src/preprocess.c59
-rw-r--r--src/tokenize.c234
-rw-r--r--src/treeify.c187
11 files changed, 241 insertions, 663 deletions
diff --git a/src/Fun.hs b/src/Fun.hs
new file mode 100644
index 0000000..fac4e23
--- /dev/null
+++ b/src/Fun.hs
@@ -0,0 +1,14 @@
+module Fun where
+
+import Fun.Compiler
+import System.Environment
+
+usage :: IO ()
+usage = putStrLn "Usage: fun <path>"
+
+run :: IO ()
+run = do
+ args <- getArgs
+ case args of
+ [path] -> compile path
+ _ -> usage
diff --git a/src/Fun/Compiler.hs b/src/Fun/Compiler.hs
new file mode 100644
index 0000000..f7e5c85
--- /dev/null
+++ b/src/Fun/Compiler.hs
@@ -0,0 +1,17 @@
+module Fun.Compiler where
+
+import Control.Exception
+import Fun.Parser
+import Fun.Tree
+
+parse :: String -> Block -- TODO: Should be tree
+parse file = case block file of
+ Nothing -> error "Invalid program"
+ Just (a, b) -> a
+
+compile :: String -> IO ()
+compile path = do
+ file <- try $ readFile path
+ case file of
+ Left exception -> print (exception :: IOError)
+ Right file -> putStrLn . show $ parse file
diff --git a/src/Fun/Parser.hs b/src/Fun/Parser.hs
new file mode 100644
index 0000000..f3f5201
--- /dev/null
+++ b/src/Fun/Parser.hs
@@ -0,0 +1,192 @@
+module Fun.Parser where
+
+import Data.Char
+import Fun.Tree
+
+type Parser a = String -> Maybe (a, String)
+
+char :: Parser Char
+char [] = Nothing
+char (x : xs) = Just (x, xs)
+
+digit :: Parser Char
+digit = char <=> isDigit
+
+digits :: Parser String
+digits = iter digit
+
+number :: Parser Integer
+number =
+ literal '-'
+ <-+> digits
+ >>> (\n -> -1 * (read n :: Integer))
+ <|> digits
+ >>> (\n -> read n :: Integer)
+
+space :: Parser Char
+space = char <=> isSpace
+
+newline :: Parser Char
+newline = char <=> (== '\n')
+
+notSpace :: Parser Char
+notSpace = char <=> (not . isSpace)
+
+letter :: Parser Char
+letter = char <=> isAlpha
+
+letters :: Parser String
+letters = iter letter
+
+alphanum :: Parser Char
+alphanum = digit <|> letter
+
+literal :: Char -> Parser Char
+literal c = char <=> (== c)
+
+result :: a -> Parser a
+result a cs = Just (a, cs)
+
+iter :: Parser Char -> Parser String
+iter m = (iterS m) <=> (/= "")
+
+iterS :: Parser a -> Parser [a]
+iterS m = m <+> iterS m >>> (\(x, y) -> x : y) <|> result []
+
+token :: Parser a -> Parser a
+token = (<+-> iterS space)
+
+-- A parser that will accept a given alpha string
+acceptWord :: String -> Parser String
+acceptWord w = token (letters <=> (== w))
+
+-- A parser that will accept a given string
+accept :: String -> Parser String
+accept w = token ((iter notSpace) <=> (== w))
+
+-- Given a parser and a predicate return the parser only if it satisfies the predicate
+infix 7 <=>
+(<=>) :: Parser a -> (a -> Bool) -> Parser a
+(parser <=> predicate) input = case parser input of
+ Nothing -> Nothing
+ Just (a, rest) -> if (predicate a) then Just (a, rest) else Nothing
+
+
+-- Combine two parser together pairing their results up in a tuple
+infixl 6 <+>
+(<+>) :: Parser a -> Parser b -> Parser (a, b)
+(parserA <+> parserB) input = case parserA input of
+ Nothing -> Nothing
+ Just (resultA, remainder) -> case parserB remainder of
+ Nothing -> Nothing
+ Just (resultB, cs) -> Just ((resultA, resultB), cs)
+
+-- Sequence operator that discards the second result
+infixl 6 <+->
+(<+->) :: Parser a -> Parser b -> Parser a
+(parserA <+-> parserB) input = case parserA input of
+ Nothing -> Nothing
+ Just (resultA, remainder) -> case parserB remainder of
+ Nothing -> Nothing
+ Just (_, cs) -> Just (resultA, cs)
+
+-- Sequence operator that discards the first result
+infixl 6 <-+>
+(<-+>) :: Parser a -> Parser b -> Parser b
+(parserA <-+> parserB) input = case parserA input of
+ Nothing -> Nothing
+ Just (resultA, remainder) -> case parserB remainder of
+ Nothing -> Nothing
+ Just (resultB, cs) -> Just (resultB, cs)
+
+-- Transform a parsers result
+infixl 5 >>>
+(>>>) :: Parser a -> (a -> b) -> Parser b
+(parser >>> transformer) input = case parser input of
+ Nothing -> Nothing
+ Just (resultA, remainder) -> Just ((transformer resultA), remainder)
+
+-- Extract a parsers result
+infix 4 +>
+(+>) :: Parser a -> (a -> Parser b) -> Parser b
+(parser +> function) input = case parser input of
+ Nothing -> Nothing
+ Just (a, cs) -> function a cs
+
+-- Combine two parsers using a 'or' type operation
+infixl 3 <|>
+(<|>) :: Parser a -> Parser a -> Parser a
+(parserA <|> parserB) input = case parserA input of
+ Nothing -> parserB input
+ result -> result
+
+----
+
+tree :: Parser Tree
+tree = iterS program >>> Tree
+
+program :: Parser Program
+program = iterS block >>> Program
+
+block :: Parser Block
+block = functionBlock <+-> newline >>> Block
+
+visibility :: Parser Char
+visibility = literal '+' <|> literal '-'
+
+functionBlock :: Parser FunctionBlock
+functionBlock =
+ functionDeclaration
+ <+> iterS functionDefinition
+ >>> (\(a, b) -> FunctionBlock a b)
+
+functionDeclaration :: Parser FunctionDeclaration
+functionDeclaration =
+ functionDeclarationWithoutFlags <|> functionDeclarationWithFlags
+
+functionDeclarationWithoutFlags :: Parser FunctionDeclaration
+functionDeclarationWithoutFlags =
+ functionName
+ <+> functionDeclarationDelimiter
+ <+> iterS functionType
+ <+-> newline
+ >>> (\((a, b), c) -> FunctionDeclarationWithoutFlags a b c)
+
+functionDeclarationWithFlags :: Parser FunctionDeclaration
+functionDeclarationWithFlags =
+ functionName
+ <+> functionDeclarationDelimiter
+ <+> functionTypeList
+ <+-> space
+ <+-> literal '%'
+ <+-> space
+ <+> iterS functionFlag
+ <+-> newline
+ >>> (\(((a, b), c), d) -> FunctionDeclarationWithFlags a b c d)
+
+functionDeclarationDelimiter :: Parser Char
+functionDeclarationDelimiter =
+ space <-+> literal ':' <-+> visibility <+-> literal ':' <+-> space
+
+functionName :: Parser String
+functionName = letter <+> iterS alphanum >>> (\(a, b) -> a : b)
+
+functionTypeList :: Parser [String]
+functionTypeList =
+ iterS ((functionType <+-> space <+-> literal ':' <+-> space) <|> functionType)
+
+functionType :: Parser String
+functionType = letter <+> iterS alphanum >>> (\(a, b) -> a : b)
+
+functionFlag :: Parser String
+functionFlag = letters
+
+functionDefinition :: Parser FunctionDefinition
+functionDefinition =
+ letters
+ <+-> space
+ <+-> literal ':'
+ <+-> space
+ <+> letters
+ <+-> newline
+ >>> (\(a, b) -> FunctionDefinition a b)
diff --git a/src/Fun/Tree.hs b/src/Fun/Tree.hs
new file mode 100644
index 0000000..a7709ee
--- /dev/null
+++ b/src/Fun/Tree.hs
@@ -0,0 +1,18 @@
+module Fun.Tree where
+
+data Tree = Tree [Program]
+ deriving Show
+
+data Program = Program [Block]
+ deriving Show
+
+data Block = Block FunctionBlock -- | Block DataBlock ...
+ deriving Show
+data FunctionBlock = FunctionBlock FunctionDeclaration [FunctionDefinition]
+ deriving Show
+
+data FunctionDeclaration = FunctionDeclarationWithoutFlags String Char [String] | FunctionDeclarationWithFlags String Char [String] [String]
+ deriving Show
+
+data FunctionDefinition = FunctionDefinition String String
+ deriving Show
diff --git a/src/context.c b/src/context.c
deleted file mode 100644
index 756c38e..0000000
--- a/src/context.c
+++ /dev/null
@@ -1,111 +0,0 @@
-#include <assert.h>
-#include <math.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <context.h>
-#include <log.h>
-#include <preprocess.h>
-#include <tokenize.h>
-#include <treeify.h>
-
-struct ctx *context_create(const char *path)
-{
- struct ctx *ctx = calloc(1, sizeof(*ctx));
- ctx->tokens = calloc(TOKENS_MAX, sizeof(*ctx->tokens));
- ctx->location.path = path; // TODO: strdup?
-
- FILE *file = fopen(path, "r");
- assert(file);
-
- // Find size of file
- fseek(file, 0, SEEK_END);
- ctx->location.size = ftell(file);
- rewind(file);
- assert(ctx->location.size);
-
- ctx->location.data = malloc(ctx->location.size + 1);
- assert(ctx->location.data);
- fread(ctx->location.data, 1, ctx->location.size, file);
- fclose(file);
-
- ctx->location.data[ctx->location.size] = 0;
-
- ctx->tree.head = tree_create();
- ctx->tree.current = NULL;
-
- return ctx;
-}
-
-void context_destroy(struct ctx *ctx)
-{
- if (!ctx)
- return;
-
- if (ctx->location.data)
- free(ctx->location.data);
-
- if (ctx->data && ctx->data != ctx->location.data)
- free(ctx->data);
-
- if (ctx->tokens)
- free(ctx->tokens);
-
- if (ctx->tree.head)
- tree_destroy(ctx->tree.head);
-
- free(ctx);
-}
-
-char context_getch(struct ctx *ctx, size_t i)
-{
- if (i >= ctx->size || !ctx->data[i])
- errln(&ctx->location, "Unexpected end of buffer");
- return ctx->data[i];
-}
-
-#define CONTEXT_COUNT 3
-void context_print(FILE *fd, struct ctx_location *location)
-{
- size_t start_line = fmax((int)location->line - CONTEXT_COUNT, 0);
- size_t end_line = location->line + CONTEXT_COUNT + 1;
-
- for (size_t line = 0, index = 0; line < end_line;) {
- if (line < start_line) {
- if (location->data[index] == '\n' || location->data[index] == MACRO_NEWLINE)
- line++;
- index++;
- continue;
- }
-
- const char *end = strchr(location->data + index, '\n') + 1;
- assert(end > location->data);
- size_t length = end - (location->data + index) - 1;
-
- if (location->line == line) {
- int pointer_length = location->column + 9;
- char *pointer = malloc(pointer_length); // Literally a pointer
- fprintf(fd,
- "\x1B[1;32m%6lu | %.*s\x1B[1;31m%c\x1B[1;32m%.*s\n\x1B[1;31m%.*s%s\x1B[0m\n",
- line + 1, (int)location->column, location->data + index,
- *(location->data + index + location->column),
- (int)(length - location->column - 1),
- location->data + index + location->column + 1, pointer_length,
- (char *)memset(pointer, '~', pointer_length), "^ (around here)");
- free(pointer);
- } else {
- fprintf(fd, "%6lu | %.*s\n", line + 1, (int)length, location->data + index);
- }
-
- index = end - location->data;
- line++;
- if (index >= location->size)
- return;
- }
-}
-
-void context_rewind(struct ctx *ctx)
-{
- ctx->location.line = 0;
- ctx->location.column = 0;
-}
diff --git a/src/lint.c b/src/lint.c
deleted file mode 100644
index 7c2e9da..0000000
--- a/src/lint.c
+++ /dev/null
@@ -1,21 +0,0 @@
-#include <stddef.h>
-
-#include <lint.h>
-#include <log.h>
-#include <tokenize.h>
-
-void lint(struct ctx *ctx)
-{
- // Lint parens
- int parens = 0;
- for (size_t i = 1; i < ctx->token_count; i++) {
- struct token *token = &ctx->tokens[i];
- if (token->type == LPAREN)
- parens++;
- else if (token->type == RPAREN)
- parens--;
- }
-
- if (parens != 0)
- errln(&ctx->location, "Invalid parens balance");
-}
diff --git a/src/log.c b/src/log.c
deleted file mode 100644
index 0e59ea2..0000000
--- a/src/log.c
+++ /dev/null
@@ -1,24 +0,0 @@
-#include <assert.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <log.h>
-
-void errln(struct ctx_location *location, const char *fmt, ...)
-{
- fprintf(stderr, "\x1B[1;36m%s:%lu:%lu:\x1B[0m ", location->path, location->line + 1,
- location->column + 1);
-
- fprintf(stderr, "\x1B[1;31mError:\x1B[0m ");
-
- va_list ap;
- va_start(ap, fmt);
- vfprintf(stderr, fmt, ap);
- va_end(ap);
-
- fprintf(stderr, "\n");
- context_print(stderr, location);
-
- exit(1);
-}
diff --git a/src/main.c b/src/main.c
deleted file mode 100644
index 5827913..0000000
--- a/src/main.c
+++ /dev/null
@@ -1,27 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <context.h>
-#include <lint.h>
-#include <log.h>
-#include <preprocess.h>
-#include <tokenize.h>
-#include <treeify.h>
-
-int main(int argc, char *argv[])
-{
- if (argc < 2) {
- fprintf(stderr, "Not enough arguments!");
- exit(1);
- }
-
- struct ctx *ctx = context_create(argv[1]);
- preprocess(ctx);
- tokenize(ctx);
- lint(ctx);
- treeify(ctx);
-
- context_destroy(ctx);
-
- return 0;
-}
diff --git a/src/preprocess.c b/src/preprocess.c
deleted file mode 100644
index f4f67f7..0000000
--- a/src/preprocess.c
+++ /dev/null
@@ -1,59 +0,0 @@
-#include <assert.h>
-#include <math.h>
-#include <stddef.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <log.h>
-#include <preprocess.h>
-
-static void preprocess_erase(struct ctx *ctx, size_t start)
-{
- assert(ctx->data[start] == '#');
-
- for (size_t i = start; i < ctx->size; i++) {
- char cur = ctx->data[i];
- if (cur == '\0')
- break;
-
- if (cur == '\n') {
- ctx->data[i] = MACRO_NEWLINE;
- break;
- } else {
- ctx->data[i] = MACRO_SKIP;
- }
- }
-}
-
-void preprocess(struct ctx *ctx)
-{
- ctx->size = ctx->location.size;
- ctx->data = malloc(ctx->size);
- memcpy(ctx->data, ctx->location.data, ctx->size);
-
- for (size_t i = 0; i < ctx->location.size; i++) {
- const char cur = ctx->location.data[i];
-
- ctx->location.column++;
-
- if (cur == '\n') {
- ctx->location.line++;
- ctx->location.column = 0;
- continue;
- } else if (cur == '\0') {
- break;
- } else if (cur == '#' && ctx->location.column == 1) {
- if (strncmp(ctx->location.data + i + 1, "inc ",
- fmin(4, ctx->location.size - i)) == 0) {
- // TODO: Add include features
- } else if (*(ctx->location.data + i + 1) == '#') {
- // Comment
- } else {
- errln(&ctx->location, "Invalid preprocessing directive");
- }
- preprocess_erase(ctx, i);
- }
- }
-
- context_rewind(ctx);
-}
diff --git a/src/tokenize.c b/src/tokenize.c
deleted file mode 100644
index 0fa58fe..0000000
--- a/src/tokenize.c
+++ /dev/null
@@ -1,234 +0,0 @@
-#include <assert.h>
-#include <ctype.h>
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <stdio.h>
-
-#include <log.h>
-#include <preprocess.h>
-#include <tokenize.h>
-
-// TODO: Do some different limitations for identifiers/types
-
-static size_t peek_identifier(struct ctx *ctx, size_t start, size_t opt_count, ...)
-{
- if (isdigit(context_getch(ctx, start)))
- errln(&ctx->location, "Identifiers can't start with numbers");
-
- for (size_t i = start; i < ctx->size; i++) {
- char cur = context_getch(ctx, i);
-
- // Check for every option in variadic argument
- va_list ap;
- va_start(ap, opt_count);
- for (size_t j = 0; j < opt_count; j++) {
- char ch = va_arg(ap, int);
- if (cur == ch) {
- va_end(ap);
- return i;
- }
- }
- va_end(ap);
-
- if (cur == '\n')
- errln(&ctx->location, "Unexpected end of line while scanning");
-
- if (!isalnum(cur) && (cur < '!' || cur > '~'))
- errln(&ctx->location, "'%c' is not an identifier", cur);
- }
-
- errln(&ctx->location, "Unexpected end of buffer while scanning");
-}
-
-static size_t peek_type(struct ctx *ctx, size_t start, size_t opt_count, ...)
-{
- if (isdigit(context_getch(ctx, start)))
- errln(&ctx->location, "Types can't start with numbers");
-
- for (size_t i = start; i < ctx->size; i++) {
- char cur = context_getch(ctx, i);
-
- // Check for every option in variadic argument
- va_list ap;
- va_start(ap, opt_count);
- for (size_t j = 0; j < opt_count; j++) {
- char ch = va_arg(ap, int);
- if (cur == ch) {
- va_end(ap);
- return i;
- }
- }
- va_end(ap);
-
- if (cur == '\n')
- errln(&ctx->location, "Unexpected end of line while scanning");
-
- if (!isalnum(cur) && (cur < '!' || cur > '~'))
- errln(&ctx->location, "'%c' is not an identifier", cur);
- }
-
- errln(&ctx->location, "Unexpected end of buffer while scanning");
-}
-
-static void token_add(struct ctx *ctx, enum token_type type, size_t start, size_t end)
-{
- assert(type != UNKNOWN);
-
- struct token token = { 0 };
- token.type = type;
- token.string.start = start;
- token.string.end = end;
- token.location = ctx->location;
-
- ctx->tokens[ctx->token_count] = token;
-
- ctx->token_count++;
- assert(ctx->token_count < TOKENS_MAX);
-
- if (type == NEWLINE) {
- ctx->location.line++;
- ctx->location.column = 0;
- } else {
- ctx->location.column += end - start;
- }
-}
-
-void token_print(struct ctx *ctx, struct token *token)
-{
- assert(token->type != UNKNOWN);
-
- printf("[token type=%d] ", token->type);
- if (token->type == NEWLINE || token->type == END) {
- printf("(Unprintable)\n");
- return;
- }
-
- printf("'%.*s'\n", (int)(token->string.end - token->string.start),
- ctx->data + token->string.start);
-}
-
-void tokenize(struct ctx *ctx)
-{
- enum {
- PARSE_DECLARATION,
- PARSE_DEFINITION,
- PARSE_NUMBER,
- PARSE_BODY,
- PARSE_STRING,
- } state = PARSE_DECLARATION,
- prev = PARSE_DECLARATION;
-
- // TODO: Clean this loop up (move into seperate tokenizing functions)
-
- size_t start;
- for (size_t i = 0; i < ctx->size; i++) {
- const char cur = context_getch(ctx, i);
-
- // String parsing
- if (cur == '"') {
- if (state == PARSE_STRING) {
- token_add(ctx, STRING, start, i + 1);
- state = prev;
- } else {
- state = PARSE_STRING;
- start = i;
- }
- continue;
- } else if (state == PARSE_STRING) {
- continue;
- }
-
- if (state != PARSE_BODY) {
- switch (cur) {
- case '\0':
- errln(&ctx->location, "Unexpected end of buffer");
- case '\n':
- token_add(ctx, NEWLINE, i, i + 1);
- continue;
- case MACRO_SKIP:
- ctx->location.column++;
- continue;
- case MACRO_NEWLINE:
- ctx->location.line++;
- continue;
- default:
- break;
- }
- }
-
- if (state == PARSE_BODY) {
- switch (cur) {
- case '(':
- token_add(ctx, LPAREN, i, i + 1);
- continue;
- case ')':
- token_add(ctx, RPAREN, i, i + 1);
- continue;
- case '\n':
- token_add(ctx, NEWLINE, i, i + 1);
- state = PARSE_DECLARATION;
- continue;
- default:
- break;
- }
-
- size_t end_ident = peek_identifier(ctx, i, 3, ' ', ')', '\n');
- token_add(ctx, IDENT, i, end_ident);
- i = end_ident - (context_getch(ctx, end_ident) != ' ');
- continue;
- }
-
- if (state == PARSE_DECLARATION) {
- size_t end_ident = peek_identifier(ctx, i, 1, ' ');
- token_add(ctx, IDENT, i, end_ident);
-
- size_t start_type = end_ident + 1;
- while (context_getch(ctx, start_type) != '-' ||
- context_getch(ctx, start_type + 1) != '>') {
- size_t end_type = peek_type(ctx, start_type, 1, ' ');
- token_add(ctx, TYPE, start_type, end_type);
- start_type = end_type + 1;
- }
-
- if (context_getch(ctx, start_type + 2) != ' ')
- errln(&ctx->location, "Missing space");
- token_add(ctx, TYPEDELIM, start_type, start_type + 2);
-
- start_type += 3;
- size_t final_type = peek_type(ctx, start_type, 1, '\n');
- token_add(ctx, TYPE, start_type, final_type);
-
- i = final_type - 1;
- state = PARSE_DEFINITION;
- continue;
- }
-
- if (state == PARSE_DEFINITION) {
- size_t end_ident = peek_identifier(ctx, i, 1, ' ');
- token_add(ctx, IDENT, i, end_ident);
-
- size_t start_parameter = end_ident + 1;
- while (context_getch(ctx, start_parameter) != ':') {
- size_t end_parameter =
- peek_identifier(ctx, start_parameter, 1, ' ');
- token_add(ctx, PARAM, start_parameter, end_parameter);
- start_parameter = end_parameter + 1;
- }
-
- if (context_getch(ctx, start_parameter + 1) != ' ')
- errln(&ctx->location, "Missing space");
- token_add(ctx, IDENTDELIM, start_parameter, start_parameter + 1);
-
- i = start_parameter + 1;
- state = PARSE_BODY;
- continue;
- }
- }
-
- /* for (size_t i = 0; i < ctx->token_count; i++) */
- /* token_print(ctx, &ctx->tokens[i]); */
-
- token_add(ctx, END, ctx->size, ctx->size);
- context_rewind(ctx);
-}
diff --git a/src/treeify.c b/src/treeify.c
deleted file mode 100644
index 47f8216..0000000
--- a/src/treeify.c
+++ /dev/null
@@ -1,187 +0,0 @@
-#include <assert.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <log.h>
-#include <tokenize.h>
-#include <treeify.h>
-
-#define INITIAL_PARAMETER_COUNT 3
-
-static void __expect(struct ctx *ctx, struct token *token, enum token_type type, const char *file,
- int line, const char *func, const char *type_enum)
-{
- if (token->type != type) {
- printf("[DBG] %s:%d: %s\n", file, line, func);
- token_print(ctx, token);
- errln(&token->location, "Expected token of type %s", type_enum);
- }
-}
-
-#define expect(token, type) __expect(ctx, token, type, __FILE__, __LINE__, __func__, #type)
-
-static struct token *next(struct token *token, size_t i)
-{
- return token + i;
-}
-
-static struct token *parse_declaration(struct ctx *ctx, struct token *token)
-{
- expect(token, IDENT);
-
- struct node_declaration *node = malloc(sizeof(*node));
- node->callee.name = token->string;
-
- node->parameters = malloc(INITIAL_PARAMETER_COUNT * sizeof(*node->parameters));
- size_t param_idx = 0;
-
- token = next(token, 1);
- while (token->type != TYPEDELIM) {
- if (token->type == NEWLINE || token->type == END)
- expect(token, TYPEDELIM);
-
- if (token->type != TYPE)
- expect(token, TYPE);
-
- // Expand parameter space if necessary
- if ((param_idx + 1) % INITIAL_PARAMETER_COUNT == 0)
- // TODO: Fix realloc failure check (and other mallocs too btw)
- node->parameters = realloc(node->parameters,
- ((param_idx / INITIAL_PARAMETER_COUNT) + 1) *
- INITIAL_PARAMETER_COUNT *
- sizeof(*node->parameters));
-
- node->parameters[param_idx].type = token->string;
- param_idx++;
-
- token = next(token, 1);
- }
-
- node->parameter_count = param_idx;
-
- token = next(token, 1);
- expect(token, TYPE);
- node->callee.type = token->string;
-
- tree_add(ctx, DECLARATION, node); // TODO: Push to declaration/signature array instead
-
- expect(next(token, 1), NEWLINE);
- return next(token, 2);
-}
-
-static struct token *parse_definition(struct ctx *ctx, struct token *token)
-{
- expect(token, IDENT);
-
- struct node_definition *node = malloc(sizeof(*node));
- node->callee.name = token->string;
-
- node->parameters = malloc(INITIAL_PARAMETER_COUNT * sizeof(*node->parameters));
- size_t param_idx = 0;
-
- token = next(token, 1);
- while (token->type != IDENTDELIM) {
- if (token->type == NEWLINE || token->type == END)
- expect(token, IDENTDELIM);
-
- if (token->type != PARAM)
- expect(token, PARAM);
-
- // Expand parameter space if necessary
- if ((param_idx + 1) % INITIAL_PARAMETER_COUNT == 0)
- // TODO: Fix realloc failure check (and other mallocs too btw)
- node->parameters = realloc(node->parameters,
- ((param_idx / INITIAL_PARAMETER_COUNT) + 1) *
- INITIAL_PARAMETER_COUNT *
- sizeof(*node->parameters));
-
- node->parameters[param_idx].name = token->string;
- param_idx++;
-
- token = next(token, 1);
- }
-
- node->parameter_count = param_idx;
-
- tree_add(ctx, DEFINITION, node);
-
- // TODO: Parse expression
- while (token->type != NEWLINE)
- token = next(token, 1);
-
- return next(token, 1);
-}
-
-static struct token *parse_block(struct ctx *ctx, struct token *token)
-{
- if (token->type != IDENT)
- return next(token, 1); //&ctx->tokens[ctx->token_count - 1];
-
- token = parse_declaration(ctx, token);
- token = parse_definition(ctx, token);
- expect(token, NEWLINE);
- return next(token, 1);
-}
-
-static struct token *parse(struct ctx *ctx, struct token *token)
-{
- while (token->type != END)
- token = parse_block(ctx, token);
-
- return token;
-}
-
-struct node *tree_create(void)
-{
- struct node *tree = malloc(sizeof(*tree));
- tree->prev = NULL;
- tree->next = NULL;
- return tree;
-}
-
-void tree_add(struct ctx *ctx, enum node_type type, void *data)
-{
- assert(ctx->tree.head);
- struct node *node = calloc(sizeof(*node), 1);
- node->type = type;
- node->data = data;
- if (!ctx->tree.current) {
- ctx->tree.head->next = node;
- node->prev = ctx->tree.head;
- } else {
- ctx->tree.current->next = node;
- node->prev = ctx->tree.current;
- }
- ctx->tree.current = node;
-}
-
-void tree_destroy(struct node *node)
-{
- while (node) {
- struct node *next = node->next;
-
- if (node->type == DEFINITION) {
- struct node_definition *definition = node->data;
- free(definition->parameters);
- free(definition);
- } else if (node->type == DECLARATION) {
- struct node_declaration *declaration = node->data;
- free(declaration->parameters);
- free(declaration);
- }
-
- free(node);
- node = next;
- }
-}
-
-void treeify(struct ctx *ctx)
-{
- struct token *token = ctx->tokens;
-
- while (token->type == NEWLINE)
- token = next(token, 1);
-
- token = parse(ctx, token);
- expect(token, END);
-}