This repository has been archived on 2022-08-30. You can view files and clone it, but cannot push or open issues or pull requests.
arf/lexer/lexer.go

372 lines
8.5 KiB
Go
Raw Normal View History

2022-08-07 19:18:59 +00:00
package lexer
2022-08-10 00:45:06 +00:00
import "io"
import "git.tebibyte.media/sashakoshka/arf/file"
import "git.tebibyte.media/sashakoshka/arf/types"
import "git.tebibyte.media/sashakoshka/arf/infoerr"
2022-08-07 19:18:59 +00:00
// LexingOperation holds information about an ongoing lexing operataion.
type LexingOperation struct {
file *file.File
char rune
tokens []Token
2022-08-07 19:18:59 +00:00
}
2022-08-10 00:45:06 +00:00
// Tokenize converts a file into a slice of tokens (lexemes).
func Tokenize (file *file.File) (tokens []Token, err error) {
lexer := LexingOperation { file: file }
err = lexer.tokenize()
tokens = lexer.tokens
2022-08-10 00:45:06 +00:00
// if the lexing operation returned io.EOF, nothing went wrong so we
// return nil for err.
if err == io.EOF {
err = nil
}
return
}
// tokenize converts a file into a slice of tokens (lexemes). It will always
// return a non-nil error, but if nothing went wrong it will return io.EOF.
func (lexer *LexingOperation) tokenize () (err error) {
// check to see if the beginning of the file says :arf
var shebangCheck = []rune(":arf\n")
for index := 0; index < 5; index ++ {
err = lexer.nextRune()
if err != nil || shebangCheck[index] != lexer.char {
err = infoerr.NewError (
lexer.file.Location(1),
"not an arf file",
infoerr.ErrorKindError)
return
}
}
2022-08-10 00:45:06 +00:00
err = lexer.nextRune()
if err != nil { return }
for {
lowercase := lexer.char >= 'a' && lexer.char <= 'z'
uppercase := lexer.char >= 'A' && lexer.char <= 'Z'
number := lexer.char >= '0' && lexer.char <= '9'
if number {
2022-08-11 05:14:41 +00:00
err = lexer.tokenizeNumberBeginning(false)
if err != nil { return }
2022-08-10 00:45:06 +00:00
} else if lowercase || uppercase {
err = lexer.tokenizeAlphaBeginning()
if err != nil { return }
2022-08-11 06:00:57 +00:00
} else {
2022-08-10 02:18:12 +00:00
err = lexer.tokenizeSymbolBeginning()
if err != nil { return }
2022-08-10 00:45:06 +00:00
}
err = lexer.skipSpaces()
if err != nil { return }
2022-08-10 00:45:06 +00:00
}
2022-08-11 08:47:42 +00:00
if lexer.tokens[len(lexer.tokens) - 1].kind != TokenKindNewline {
token := lexer.newToken()
token.kind = TokenKindNewline
lexer.addToken(token)
2022-08-11 08:47:42 +00:00
}
2022-08-10 00:45:06 +00:00
return
2022-08-07 19:18:59 +00:00
}
func (lexer *LexingOperation) tokenizeAlphaBeginning () (err error) {
token := lexer.newToken()
token.kind = TokenKindName
got := ""
for {
lowercase := lexer.char >= 'a' && lexer.char <= 'z'
uppercase := lexer.char >= 'A' && lexer.char <= 'Z'
number := lexer.char >= '0' && lexer.char <= '9'
if !lowercase && !uppercase && !number { break }
got += string(lexer.char)
lexer.nextRune()
}
token.value = got
if len(got) == 2 {
firstValid := got[0] == 'n' || got[0] == 'r' || got[0] == 'w'
secondValid := got[1] == 'n' || got[1] == 'r' || got[1] == 'w'
if firstValid && secondValid {
token.kind = TokenKindPermission
token.value = types.PermissionFrom(got)
}
}
lexer.addToken(token)
return
}
2022-08-10 02:18:12 +00:00
func (lexer *LexingOperation) tokenizeSymbolBeginning () (err error) {
switch lexer.char {
2022-08-10 05:03:59 +00:00
case '#':
// comment
for lexer.char != '\n' {
err = lexer.nextRune()
if err != nil { return }
2022-08-10 05:03:59 +00:00
}
2022-08-10 02:18:12 +00:00
case '\t':
2022-08-10 05:03:59 +00:00
// indent level
previousToken := lexer.tokens[len(lexer.tokens) - 1]
if !previousToken.Is(TokenKindNewline) {
err = lexer.nextRune()
infoerr.NewError (
lexer.file.Location(1),
"tab not used as indent",
infoerr.ErrorKindWarn).Print()
return
}
token := lexer.newToken()
token.kind = TokenKindIndent
// eat up tabs while increasing the indent level
indentLevel := 0
2022-08-10 02:18:12 +00:00
for lexer.char == '\t' {
indentLevel ++
err = lexer.nextRune()
if err != nil { return }
2022-08-10 02:18:12 +00:00
}
token.value = indentLevel
lexer.addToken(token)
2022-08-10 05:03:59 +00:00
case '\n':
// line break
// if the last line is empty, discard it
2022-08-11 08:47:42 +00:00
lastLineEmpty := true
tokenIndex := len(lexer.tokens) - 1
for lexer.tokens[tokenIndex].kind != TokenKindNewline {
if lexer.tokens[tokenIndex].kind != TokenKindIndent {
lastLineEmpty = false
break
}
tokenIndex --
}
if lastLineEmpty {
lexer.tokens = lexer.tokens[:tokenIndex]
}
token := lexer.newToken()
token.kind = TokenKindNewline
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 02:18:12 +00:00
case '"':
err = lexer.tokenizeString(false)
2022-08-10 02:18:12 +00:00
case '\'':
err = lexer.tokenizeString(true)
2022-08-10 02:18:12 +00:00
case ':':
token := lexer.newToken()
token.kind = TokenKindColon
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 02:18:12 +00:00
case '.':
token := lexer.newToken()
2022-08-17 00:24:27 +00:00
err = lexer.nextRune()
if err != nil { return }
token.kind = TokenKindDot
2022-08-17 00:24:27 +00:00
if lexer.char == '.' {
token.kind = TokenKindElipsis
err = lexer.nextRune()
}
lexer.addToken(token)
2022-08-15 18:50:09 +00:00
case ',':
token := lexer.newToken()
token.kind = TokenKindComma
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 02:18:12 +00:00
case '[':
token := lexer.newToken()
token.kind = TokenKindLBracket
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 02:18:12 +00:00
case ']':
token := lexer.newToken()
token.kind = TokenKindRBracket
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 02:18:12 +00:00
case '{':
token := lexer.newToken()
token.kind = TokenKindLBrace
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 02:18:12 +00:00
case '}':
token := lexer.newToken()
token.kind = TokenKindRBrace
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 04:48:18 +00:00
case '+':
token := lexer.newToken()
err = lexer.nextRune()
if err != nil { return }
token.kind = TokenKindPlus
if lexer.char == '+' {
token.kind = TokenKindIncrement
err = lexer.nextRune()
}
lexer.addToken(token)
2022-08-10 04:48:18 +00:00
case '-':
err = lexer.tokenizeDashBeginning()
2022-08-10 04:48:18 +00:00
case '*':
token := lexer.newToken()
token.kind = TokenKindAsterisk
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 04:48:18 +00:00
case '/':
token := lexer.newToken()
token.kind = TokenKindSlash
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 04:48:18 +00:00
case '@':
token := lexer.newToken()
token.kind = TokenKindAt
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 04:48:18 +00:00
case '!':
token := lexer.newToken()
token.kind = TokenKindExclamation
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 04:48:18 +00:00
case '%':
token := lexer.newToken()
token.kind = TokenKindPercent
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 04:48:18 +00:00
case '~':
token := lexer.newToken()
token.kind = TokenKindTilde
lexer.addToken(token)
err = lexer.nextRune()
2022-08-10 04:48:18 +00:00
case '<':
token := lexer.newToken()
err = lexer.nextRune()
if err != nil { return }
token.kind = TokenKindLessThan
if lexer.char == '<' {
token.kind = TokenKindLShift
err = lexer.nextRune()
}
lexer.addToken(token)
2022-08-10 04:48:18 +00:00
case '>':
token := lexer.newToken()
err = lexer.nextRune()
if err != nil { return }
token.kind = TokenKindGreaterThan
if lexer.char == '>' {
token.kind = TokenKindRShift
err = lexer.nextRune()
}
lexer.addToken(token)
2022-08-10 04:48:18 +00:00
case '|':
token := lexer.newToken()
err = lexer.nextRune()
if err != nil { return }
token.kind = TokenKindBinaryOr
if lexer.char == '|' {
token.kind = TokenKindLogicalOr
err = lexer.nextRune()
}
lexer.addToken(token)
2022-08-10 04:48:18 +00:00
case '&':
token := lexer.newToken()
err = lexer.nextRune()
if err != nil { return }
token.kind = TokenKindBinaryAnd
if lexer.char == '&' {
token.kind = TokenKindLogicalAnd
err = lexer.nextRune()
}
lexer.addToken(token)
2022-08-10 02:18:12 +00:00
default:
err = infoerr.NewError (
lexer.file.Location(1),
"unexpected symbol character " +
2022-08-10 02:18:12 +00:00
string(lexer.char),
infoerr.ErrorKindError)
2022-08-10 02:18:12 +00:00
return
}
return
}
func (lexer *LexingOperation) tokenizeDashBeginning () (err error) {
2022-08-11 06:10:34 +00:00
err = lexer.nextRune()
if err != nil { return }
if lexer.char == '-' {
token := lexer.newToken()
token.kind = TokenKindDecrement
2022-08-11 06:10:34 +00:00
err = lexer.nextRune()
if err != nil { return }
if lexer.char == '-' {
token.kind = TokenKindSeparator
lexer.nextRune()
}
lexer.addToken(token)
} else if lexer.char == '>' {
token := lexer.newToken()
token.kind = TokenKindReturnDirection
2022-08-11 06:10:34 +00:00
err = lexer.nextRune()
if err != nil { return }
2022-08-11 06:10:34 +00:00
lexer.addToken(token)
} else if lexer.char >= '0' && lexer.char <= '9' {
lexer.tokenizeNumberBeginning(true)
} else {
token := lexer.newToken()
token.kind = TokenKindMinus
2022-08-11 06:10:34 +00:00
lexer.addToken(token)
}
return
}
// newToken creates a new token from the lexer's current position in the file.
func (lexer *LexingOperation) newToken () (token Token) {
return Token { location: lexer.file.Location(1) }
}
// addToken adds a new token to the lexer's token slice.
func (lexer *LexingOperation) addToken (token Token) {
lexer.tokens = append(lexer.tokens, token)
}
// skipSpaces skips all space characters (not tabs or newlines)
func (lexer *LexingOperation) skipSpaces () (err error) {
for lexer.char == ' ' {
err = lexer.nextRune()
if err != nil { return }
}
return
}
2022-08-10 00:45:06 +00:00
// nextRune advances the lexer to the next rune in the file.
func (lexer *LexingOperation) nextRune () (err error) {
lexer.char, _, err = lexer.file.ReadRune()
if err != nil && err != io.EOF {
return infoerr.NewError (
lexer.file.Location(1),
err.Error(), infoerr.ErrorKindError)
2022-08-10 00:45:06 +00:00
}
2022-08-07 19:18:59 +00:00
return
}