From 488220f3a7071b5287d15337cf127ff3c641b198 Mon Sep 17 00:00:00 2001 From: Sasha Koshka Date: Wed, 10 Aug 2022 11:43:21 -0400 Subject: [PATCH] Added all tokens to unit test --- lexer/lexer_test.go | 40 ++++++++++++++++++++++++++++++---- lexer/token.go | 4 +++- tests/lexer/all | 2 ++ tests/{parser => lexer}/indent | 0 tests/parser/all | 6 ----- 5 files changed, 41 insertions(+), 11 deletions(-) create mode 100644 tests/lexer/all rename tests/{parser => lexer}/indent (100%) delete mode 100644 tests/parser/all diff --git a/lexer/lexer_test.go b/lexer/lexer_test.go index 1f8a7c1..26ffbc9 100644 --- a/lexer/lexer_test.go +++ b/lexer/lexer_test.go @@ -4,20 +4,52 @@ import "testing" import "github.com/sashakoshka/arf/file" func TestTokenizeAll (test *testing.T) { - file, err := file.Open("tests/parser/all") + file, err := file.Open("tests/lexer/all") if err != nil { test.Log(err) test.Fail() } tokens, err := Tokenize(file) - if err != nil { - test.Log(err) + if err == nil { + test.Log("Tokenize() have returned an error") test.Fail() } correct := []Token { - Token { kind: TokenKindSeparator, }, + Token { kind: TokenKindSeparator }, + Token { kind: TokenKindPermission /* TODO: value */ }, + Token { kind: TokenKindReturnDirection }, + Token { kind: TokenKindInt, value: -349820394 }, + Token { kind: TokenKindUInt, value: 932748397 }, + Token { kind: TokenKindFloat, value: 239485.37520 }, + Token { kind: TokenKindString, value: "hello world\n" }, + Token { kind: TokenKindRune, value: 'E' }, + Token { kind: TokenKindName, value: "helloWorld" }, + Token { kind: TokenKindColon }, + Token { kind: TokenKindDot }, + Token { kind: TokenKindLBracket }, + Token { kind: TokenKindRBracket }, + Token { kind: TokenKindLBrace }, + Token { kind: TokenKindRBrace }, + Token { kind: TokenKindPlus }, + Token { kind: TokenKindMinus }, + Token { kind: TokenKindIncrement }, + Token { kind: TokenKindDecrement }, + Token { kind: TokenKindAsterisk }, + Token { kind: TokenKindSlash }, + Token { kind: TokenKindAt }, + Token { kind: TokenKindExclamation }, + Token { kind: TokenKindPercent }, + Token { kind: TokenKindTilde }, + Token { kind: TokenKindLessThan }, + Token { kind: TokenKindLShift }, + Token { kind: TokenKindGreaterThan }, + Token { kind: TokenKindRShift }, + Token { kind: TokenKindBinaryOr }, + Token { kind: TokenKindLogicalOr }, + Token { kind: TokenKindBinaryAnd }, + Token { kind: TokenKindLogicalAnd }, } if len(tokens) != len(correct) { diff --git a/lexer/token.go b/lexer/token.go index 7569257..e803606 100644 --- a/lexer/token.go +++ b/lexer/token.go @@ -14,12 +14,12 @@ const ( TokenKindReturnDirection TokenKindInt + TokenKindUInt TokenKindFloat TokenKindString TokenKindRune TokenKindName - TokenKindSymbol TokenKindColon TokenKindDot @@ -31,6 +31,8 @@ const ( TokenKindPlus TokenKindMinus + TokenKindIncrement + TokenKindDecrement TokenKindAsterisk TokenKindSlash diff --git a/tests/lexer/all b/tests/lexer/all new file mode 100644 index 0000000..f2c7b83 --- /dev/null +++ b/tests/lexer/all @@ -0,0 +1,2 @@ +--- rw -> -349820394 932748397 239485.37520 "hello world!\n" 'E' helloWorld:.[]{} ++ - ++ -- * / @ ! % ~ < << > >> | || & && diff --git a/tests/parser/indent b/tests/lexer/indent similarity index 100% rename from tests/parser/indent rename to tests/lexer/indent diff --git a/tests/parser/all b/tests/parser/all deleted file mode 100644 index 8e9b527..0000000 --- a/tests/parser/all +++ /dev/null @@ -1,6 +0,0 @@ -# normal tokens ---- rn rr nw -> 239 -120 + - / * -- ++ [{[skadjsk 2039.32]}] 0x5FC2D -"skdjlsakdj" '\n' - -# erroneous tokens (poorly spaced) ----rnrrnw->239-120+-/*--++