Normalized whitespace tokens

This commit is contained in:
Sasha Koshka 2022-08-11 03:47:42 -05:00
parent 0a31ea7bf8
commit af6f170833
2 changed files with 21 additions and 0 deletions

View File

@ -54,6 +54,10 @@ func (lexer *LexingOperation) tokenize () (err error) {
if err != nil { return } if err != nil { return }
} }
if lexer.tokens[len(lexer.tokens) - 1].kind != TokenKindNewline {
lexer.addToken(Token { kind: TokenKindNewline })
}
return return
} }
@ -119,6 +123,21 @@ func (lexer *LexingOperation) tokenizeSymbolBeginning () (err error) {
} }
case '\n': case '\n':
// line break // line break
lastLineEmpty := true
tokenIndex := len(lexer.tokens) - 1
for lexer.tokens[tokenIndex].kind != TokenKindNewline {
if lexer.tokens[tokenIndex].kind != TokenKindIndent {
lastLineEmpty = false
break
}
tokenIndex --
}
if lastLineEmpty {
lexer.tokens = lexer.tokens[:tokenIndex]
}
// TODO: if last line was blank, (ony whitespace) discard. // TODO: if last line was blank, (ony whitespace) discard.
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindNewline, kind: TokenKindNewline,

View File

@ -45,6 +45,7 @@ func TestTokenizeAll (test *testing.T) {
Token { kind: TokenKindRBracket }, Token { kind: TokenKindRBracket },
Token { kind: TokenKindLBrace }, Token { kind: TokenKindLBrace },
Token { kind: TokenKindRBrace }, Token { kind: TokenKindRBrace },
Token { kind: TokenKindNewline },
Token { kind: TokenKindPlus }, Token { kind: TokenKindPlus },
Token { kind: TokenKindMinus }, Token { kind: TokenKindMinus },
Token { kind: TokenKindIncrement }, Token { kind: TokenKindIncrement },
@ -63,6 +64,7 @@ func TestTokenizeAll (test *testing.T) {
Token { kind: TokenKindLogicalOr }, Token { kind: TokenKindLogicalOr },
Token { kind: TokenKindBinaryAnd }, Token { kind: TokenKindBinaryAnd },
Token { kind: TokenKindLogicalAnd }, Token { kind: TokenKindLogicalAnd },
Token { kind: TokenKindNewline },
} }
if len(tokens) != len(correct) { if len(tokens) != len(correct) {