2022-10-11 22:48:55 -06:00
|
|
|
/*
|
|
|
|
Package lexer implements a tokenizer for the ARF language. It contains a
|
|
|
|
function called Tokenize which takes in a file from the ARF file package, and
|
|
|
|
outputs an array of tokens.
|
|
|
|
*/
|
2022-08-07 13:18:59 -06:00
|
|
|
package lexer
|
|
|
|
|
2022-08-09 18:45:06 -06:00
|
|
|
import "io"
|
2022-08-29 23:11:10 -06:00
|
|
|
import "git.tebibyte.media/arf/arf/file"
|
|
|
|
import "git.tebibyte.media/arf/arf/types"
|
|
|
|
import "git.tebibyte.media/arf/arf/infoerr"
|
2022-08-07 13:18:59 -06:00
|
|
|
|
2022-10-11 22:00:34 -06:00
|
|
|
// lexingOperation holds information about an ongoing lexing operataion.
|
|
|
|
type lexingOperation struct {
|
2022-08-09 20:12:14 -06:00
|
|
|
file *file.File
|
|
|
|
char rune
|
|
|
|
tokens []Token
|
2022-08-07 13:18:59 -06:00
|
|
|
}
|
|
|
|
|
2022-08-09 18:45:06 -06:00
|
|
|
// Tokenize converts a file into a slice of tokens (lexemes).
|
|
|
|
func Tokenize (file *file.File) (tokens []Token, err error) {
|
2022-10-11 22:00:34 -06:00
|
|
|
lexer := lexingOperation { file: file }
|
2022-08-10 11:03:48 -06:00
|
|
|
err = lexer.tokenize()
|
2022-08-09 20:12:14 -06:00
|
|
|
tokens = lexer.tokens
|
2022-08-09 18:45:06 -06:00
|
|
|
|
|
|
|
// if the lexing operation returned io.EOF, nothing went wrong so we
|
|
|
|
// return nil for err.
|
|
|
|
if err == io.EOF {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// tokenize converts a file into a slice of tokens (lexemes). It will always
|
|
|
|
// return a non-nil error, but if nothing went wrong it will return io.EOF.
|
2022-10-11 22:00:34 -06:00
|
|
|
func (lexer *lexingOperation) tokenize () (err error) {
|
2022-08-12 09:38:23 -06:00
|
|
|
// check to see if the beginning of the file says :arf
|
|
|
|
var shebangCheck = []rune(":arf\n")
|
|
|
|
for index := 0; index < 5; index ++ {
|
|
|
|
err = lexer.nextRune()
|
|
|
|
|
|
|
|
if err != nil || shebangCheck[index] != lexer.char {
|
2022-08-17 22:58:40 -06:00
|
|
|
err = infoerr.NewError (
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.file.Location(1),
|
2022-08-12 09:38:23 -06:00
|
|
|
"not an arf file",
|
2022-08-17 22:58:40 -06:00
|
|
|
infoerr.ErrorKindError)
|
2022-08-12 09:38:23 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-09 18:45:06 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
|
|
|
|
|
|
|
for {
|
|
|
|
lowercase := lexer.char >= 'a' && lexer.char <= 'z'
|
|
|
|
uppercase := lexer.char >= 'A' && lexer.char <= 'Z'
|
|
|
|
number := lexer.char >= '0' && lexer.char <= '9'
|
|
|
|
|
|
|
|
if number {
|
2022-08-10 23:14:41 -06:00
|
|
|
err = lexer.tokenizeNumberBeginning(false)
|
|
|
|
if err != nil { return }
|
2022-08-09 18:45:06 -06:00
|
|
|
} else if lowercase || uppercase {
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.tokenizeAlphaBeginning()
|
|
|
|
if err != nil { return }
|
2022-08-11 00:00:57 -06:00
|
|
|
} else {
|
2022-08-09 20:18:12 -06:00
|
|
|
err = lexer.tokenizeSymbolBeginning()
|
2022-08-10 11:59:09 -06:00
|
|
|
if err != nil { return }
|
2022-08-09 18:45:06 -06:00
|
|
|
}
|
|
|
|
|
2022-08-10 11:59:09 -06:00
|
|
|
err = lexer.skipSpaces()
|
|
|
|
if err != nil { return }
|
2022-08-09 18:45:06 -06:00
|
|
|
}
|
|
|
|
|
2022-10-11 22:00:34 -06:00
|
|
|
// TODO: figure out why this is here and what its proper place is
|
|
|
|
// because it is apparently unreachable
|
2022-08-11 02:47:42 -06:00
|
|
|
if lexer.tokens[len(lexer.tokens) - 1].kind != TokenKindNewline {
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindNewline
|
|
|
|
lexer.addToken(token)
|
2022-08-11 02:47:42 -06:00
|
|
|
}
|
|
|
|
|
2022-08-09 18:45:06 -06:00
|
|
|
return
|
2022-08-07 13:18:59 -06:00
|
|
|
}
|
|
|
|
|
2022-10-11 22:00:34 -06:00
|
|
|
func (lexer *lexingOperation) tokenizeAlphaBeginning () (err error) {
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindName
|
|
|
|
|
2022-08-10 12:55:26 -06:00
|
|
|
got := ""
|
|
|
|
|
|
|
|
for {
|
|
|
|
lowercase := lexer.char >= 'a' && lexer.char <= 'z'
|
|
|
|
uppercase := lexer.char >= 'A' && lexer.char <= 'Z'
|
|
|
|
number := lexer.char >= '0' && lexer.char <= '9'
|
|
|
|
if !lowercase && !uppercase && !number { break }
|
|
|
|
|
|
|
|
got += string(lexer.char)
|
|
|
|
|
|
|
|
lexer.nextRune()
|
|
|
|
}
|
|
|
|
|
2022-08-12 13:34:07 -06:00
|
|
|
token.value = got
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(len(got))
|
2022-08-10 12:55:26 -06:00
|
|
|
|
|
|
|
if len(got) == 2 {
|
2022-08-18 10:09:17 -06:00
|
|
|
permission, isPermission := types.PermissionFrom(got)
|
|
|
|
|
|
|
|
if isPermission {
|
2022-08-10 12:55:26 -06:00
|
|
|
token.kind = TokenKindPermission
|
2022-08-18 10:09:17 -06:00
|
|
|
token.value = permission
|
2022-08-10 12:55:26 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lexer.addToken(token)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-10-11 22:00:34 -06:00
|
|
|
func (lexer *lexingOperation) tokenizeSymbolBeginning () (err error) {
|
2022-08-09 20:18:12 -06:00
|
|
|
switch lexer.char {
|
2022-08-09 23:03:59 -06:00
|
|
|
case '#':
|
|
|
|
// comment
|
|
|
|
for lexer.char != '\n' {
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-09 23:03:59 -06:00
|
|
|
}
|
2022-08-09 20:18:12 -06:00
|
|
|
case '\t':
|
2022-08-09 23:03:59 -06:00
|
|
|
// indent level
|
2022-08-09 23:22:53 -06:00
|
|
|
previousToken := lexer.tokens[len(lexer.tokens) - 1]
|
|
|
|
|
2022-08-11 17:34:02 -06:00
|
|
|
if !previousToken.Is(TokenKindNewline) {
|
|
|
|
err = lexer.nextRune()
|
|
|
|
|
2022-08-17 22:58:40 -06:00
|
|
|
infoerr.NewError (
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.file.Location(1),
|
2022-08-09 23:22:53 -06:00
|
|
|
"tab not used as indent",
|
2022-08-17 22:58:40 -06:00
|
|
|
infoerr.ErrorKindWarn).Print()
|
2022-08-11 17:34:02 -06:00
|
|
|
return
|
2022-08-09 23:22:53 -06:00
|
|
|
}
|
2022-08-12 13:34:07 -06:00
|
|
|
|
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindIndent
|
2022-08-11 11:53:20 -06:00
|
|
|
|
2022-08-11 17:34:02 -06:00
|
|
|
// eat up tabs while increasing the indent level
|
2022-08-11 11:53:20 -06:00
|
|
|
indentLevel := 0
|
2022-08-09 20:18:12 -06:00
|
|
|
for lexer.char == '\t' {
|
2022-08-11 11:53:20 -06:00
|
|
|
indentLevel ++
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-09 20:18:12 -06:00
|
|
|
}
|
2022-08-12 13:34:07 -06:00
|
|
|
|
|
|
|
token.value = indentLevel
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(indentLevel)
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.addToken(token)
|
2022-08-09 23:03:59 -06:00
|
|
|
case '\n':
|
|
|
|
// line break
|
2022-08-11 17:34:02 -06:00
|
|
|
|
|
|
|
// if the last line is empty, discard it
|
2022-08-11 02:47:42 -06:00
|
|
|
lastLineEmpty := true
|
|
|
|
tokenIndex := len(lexer.tokens) - 1
|
|
|
|
for lexer.tokens[tokenIndex].kind != TokenKindNewline {
|
|
|
|
if lexer.tokens[tokenIndex].kind != TokenKindIndent {
|
|
|
|
lastLineEmpty = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
tokenIndex --
|
|
|
|
}
|
|
|
|
|
|
|
|
if lastLineEmpty {
|
|
|
|
lexer.tokens = lexer.tokens[:tokenIndex]
|
|
|
|
}
|
|
|
|
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindNewline
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 20:18:12 -06:00
|
|
|
case '\'':
|
2022-10-04 14:35:00 -06:00
|
|
|
err = lexer.tokenizeString()
|
2022-08-09 20:18:12 -06:00
|
|
|
case ':':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindColon
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 20:18:12 -06:00
|
|
|
case '.':
|
2022-08-16 18:26:06 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-16 18:24:27 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindDot
|
2022-08-16 18:24:27 -06:00
|
|
|
if lexer.char == '.' {
|
|
|
|
token.kind = TokenKindElipsis
|
|
|
|
err = lexer.nextRune()
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(2)
|
2022-08-16 18:24:27 -06:00
|
|
|
}
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.addToken(token)
|
2022-08-15 12:50:09 -06:00
|
|
|
case ',':
|
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindComma
|
|
|
|
lexer.addToken(token)
|
|
|
|
err = lexer.nextRune()
|
2022-09-13 15:04:43 -06:00
|
|
|
case '(':
|
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindLParen
|
|
|
|
lexer.addToken(token)
|
|
|
|
err = lexer.nextRune()
|
|
|
|
case ')':
|
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindRParen
|
|
|
|
lexer.addToken(token)
|
|
|
|
err = lexer.nextRune()
|
2022-08-09 20:18:12 -06:00
|
|
|
case '[':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindLBracket
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 20:18:12 -06:00
|
|
|
case ']':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindRBracket
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 20:18:12 -06:00
|
|
|
case '{':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindLBrace
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 20:18:12 -06:00
|
|
|
case '}':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindRBrace
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 22:48:18 -06:00
|
|
|
case '+':
|
2022-08-16 18:26:06 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-11 01:54:20 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindPlus
|
2022-08-11 01:54:20 -06:00
|
|
|
if lexer.char == '+' {
|
|
|
|
token.kind = TokenKindIncrement
|
2022-08-16 18:26:06 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(2)
|
2022-08-11 01:54:20 -06:00
|
|
|
}
|
|
|
|
lexer.addToken(token)
|
2022-08-09 22:48:18 -06:00
|
|
|
case '-':
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.tokenizeDashBeginning()
|
2022-08-09 22:48:18 -06:00
|
|
|
case '*':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindAsterisk
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 22:48:18 -06:00
|
|
|
case '/':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindSlash
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 22:48:18 -06:00
|
|
|
case '@':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
|
|
|
token.kind = TokenKindAt
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:55:26 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-09 22:48:18 -06:00
|
|
|
case '!':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-24 22:05:40 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindExclamation
|
2022-08-24 22:05:40 -06:00
|
|
|
if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindNotEqualTo
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
|
|
|
}
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.addToken(token)
|
2022-08-09 22:48:18 -06:00
|
|
|
case '%':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-29 23:02:49 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindPercent
|
2022-08-29 23:02:49 -06:00
|
|
|
if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindPercentAssignment
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
|
|
|
}
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.addToken(token)
|
2022-08-09 22:48:18 -06:00
|
|
|
case '~':
|
2022-08-12 13:34:07 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-29 23:02:49 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindTilde
|
2022-08-29 23:02:49 -06:00
|
|
|
if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindTildeAssignment
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
|
|
|
}
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.addToken(token)
|
2022-08-24 22:05:40 -06:00
|
|
|
case '=':
|
|
|
|
token := lexer.newToken()
|
|
|
|
err = lexer.nextRune()
|
2022-09-03 18:23:27 -06:00
|
|
|
if err != nil { return }
|
|
|
|
token.kind = TokenKindAssignment
|
|
|
|
if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindEqualTo
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
|
|
|
}
|
|
|
|
lexer.addToken(token)
|
2022-08-09 22:48:18 -06:00
|
|
|
case '<':
|
2022-08-16 18:26:06 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-11 01:54:20 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindLessThan
|
2022-08-11 01:54:20 -06:00
|
|
|
if lexer.char == '<' {
|
|
|
|
token.kind = TokenKindLShift
|
2022-08-16 18:26:06 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(2)
|
2022-08-29 23:02:49 -06:00
|
|
|
if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindLShiftAssignment
|
|
|
|
err = lexer.nextRune()
|
2022-08-29 23:35:40 -06:00
|
|
|
token.location.SetWidth(3)
|
2022-08-29 23:02:49 -06:00
|
|
|
}
|
2022-08-24 22:05:40 -06:00
|
|
|
} else if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindLessThanEqualTo
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
2022-08-11 01:54:20 -06:00
|
|
|
}
|
|
|
|
lexer.addToken(token)
|
2022-08-09 22:48:18 -06:00
|
|
|
case '>':
|
2022-08-16 18:26:06 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-11 01:54:20 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindGreaterThan
|
2022-08-11 01:54:20 -06:00
|
|
|
if lexer.char == '>' {
|
|
|
|
token.kind = TokenKindRShift
|
2022-08-16 18:26:06 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(2)
|
2022-08-29 23:02:49 -06:00
|
|
|
if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindRShiftAssignment
|
|
|
|
err = lexer.nextRune()
|
2022-08-29 23:35:40 -06:00
|
|
|
token.location.SetWidth(3)
|
2022-08-29 23:02:49 -06:00
|
|
|
}
|
2022-08-24 22:05:40 -06:00
|
|
|
} else if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindGreaterThanEqualTo
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
2022-08-11 01:54:20 -06:00
|
|
|
}
|
|
|
|
lexer.addToken(token)
|
2022-08-09 22:48:18 -06:00
|
|
|
case '|':
|
2022-08-16 18:26:06 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-11 01:54:20 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindBinaryOr
|
2022-08-11 01:54:20 -06:00
|
|
|
if lexer.char == '|' {
|
|
|
|
token.kind = TokenKindLogicalOr
|
2022-08-16 18:26:06 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(2)
|
2022-08-29 23:02:49 -06:00
|
|
|
} else if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindBinaryOrAssignment
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
2022-08-11 01:54:20 -06:00
|
|
|
}
|
|
|
|
lexer.addToken(token)
|
2022-08-09 22:48:18 -06:00
|
|
|
case '&':
|
2022-08-16 18:26:06 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-11 01:54:20 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindBinaryAnd
|
2022-08-11 01:54:20 -06:00
|
|
|
if lexer.char == '&' {
|
|
|
|
token.kind = TokenKindLogicalAnd
|
2022-08-16 18:26:06 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(2)
|
2022-08-29 23:02:49 -06:00
|
|
|
} else if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindBinaryAndAssignment
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
2022-08-11 01:54:20 -06:00
|
|
|
}
|
|
|
|
lexer.addToken(token)
|
2022-08-29 23:33:58 -06:00
|
|
|
case '^':
|
|
|
|
token := lexer.newToken()
|
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
|
|
|
token.kind = TokenKindBinaryXor
|
|
|
|
if lexer.char == '=' {
|
|
|
|
token.kind = TokenKindBinaryXorAssignment
|
|
|
|
err = lexer.nextRune()
|
|
|
|
token.location.SetWidth(2)
|
|
|
|
}
|
|
|
|
lexer.addToken(token)
|
2022-08-09 20:18:12 -06:00
|
|
|
default:
|
2022-08-17 22:58:40 -06:00
|
|
|
err = infoerr.NewError (
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.file.Location(1),
|
2022-08-10 12:55:26 -06:00
|
|
|
"unexpected symbol character " +
|
2022-08-09 20:18:12 -06:00
|
|
|
string(lexer.char),
|
2022-08-17 22:58:40 -06:00
|
|
|
infoerr.ErrorKindError)
|
2022-08-09 20:18:12 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-10-11 22:00:34 -06:00
|
|
|
func (lexer *lexingOperation) tokenizeDashBeginning () (err error) {
|
2022-09-03 10:58:06 -06:00
|
|
|
token := lexer.newToken()
|
2022-08-11 00:10:34 -06:00
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
|
|
|
|
2022-08-10 12:18:28 -06:00
|
|
|
if lexer.char == '-' {
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindDecrement
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(2)
|
2022-08-11 00:10:34 -06:00
|
|
|
|
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
|
|
|
|
|
|
|
if lexer.char == '-' {
|
|
|
|
token.kind = TokenKindSeparator
|
|
|
|
lexer.nextRune()
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(3)
|
2022-08-11 00:10:34 -06:00
|
|
|
}
|
|
|
|
lexer.addToken(token)
|
2022-08-10 12:18:28 -06:00
|
|
|
} else if lexer.char == '>' {
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindReturnDirection
|
2022-08-18 09:35:48 -06:00
|
|
|
token.location.SetWidth(2)
|
2022-08-10 12:18:28 -06:00
|
|
|
|
2022-09-03 10:58:06 -06:00
|
|
|
err = lexer.nextRune()
|
2022-08-11 00:10:34 -06:00
|
|
|
if err != nil { return }
|
2022-08-10 12:55:26 -06:00
|
|
|
|
2022-08-11 00:10:34 -06:00
|
|
|
lexer.addToken(token)
|
|
|
|
} else if lexer.char >= '0' && lexer.char <= '9' {
|
|
|
|
lexer.tokenizeNumberBeginning(true)
|
|
|
|
} else {
|
2022-08-12 13:34:07 -06:00
|
|
|
token.kind = TokenKindMinus
|
2022-08-11 00:10:34 -06:00
|
|
|
lexer.addToken(token)
|
|
|
|
}
|
|
|
|
|
2022-08-10 12:18:28 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-12 13:34:07 -06:00
|
|
|
// newToken creates a new token from the lexer's current position in the file.
|
2022-10-11 22:00:34 -06:00
|
|
|
func (lexer *lexingOperation) newToken () (token Token) {
|
2022-08-12 13:34:07 -06:00
|
|
|
return Token { location: lexer.file.Location(1) }
|
|
|
|
}
|
|
|
|
|
2022-08-10 11:59:09 -06:00
|
|
|
// addToken adds a new token to the lexer's token slice.
|
2022-10-11 22:00:34 -06:00
|
|
|
func (lexer *lexingOperation) addToken (token Token) {
|
2022-08-09 20:12:14 -06:00
|
|
|
lexer.tokens = append(lexer.tokens, token)
|
|
|
|
}
|
|
|
|
|
2022-08-10 11:59:09 -06:00
|
|
|
// skipSpaces skips all space characters (not tabs or newlines)
|
2022-10-11 22:00:34 -06:00
|
|
|
func (lexer *lexingOperation) skipSpaces () (err error) {
|
2022-08-10 11:59:09 -06:00
|
|
|
for lexer.char == ' ' {
|
|
|
|
err = lexer.nextRune()
|
|
|
|
if err != nil { return }
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-09 18:45:06 -06:00
|
|
|
// nextRune advances the lexer to the next rune in the file.
|
2022-10-11 22:00:34 -06:00
|
|
|
func (lexer *lexingOperation) nextRune () (err error) {
|
2022-08-09 18:45:06 -06:00
|
|
|
lexer.char, _, err = lexer.file.ReadRune()
|
|
|
|
if err != nil && err != io.EOF {
|
2022-08-17 22:58:40 -06:00
|
|
|
return infoerr.NewError (
|
2022-08-12 13:34:07 -06:00
|
|
|
lexer.file.Location(1),
|
2022-08-17 22:58:40 -06:00
|
|
|
err.Error(), infoerr.ErrorKindError)
|
2022-08-09 18:45:06 -06:00
|
|
|
}
|
2022-08-07 13:18:59 -06:00
|
|
|
return
|
|
|
|
}
|