Name and permission tokens are now supported

This commit is contained in:
Sasha Koshka 2022-08-10 14:55:26 -04:00
parent 9c1baf8216
commit 2220b95cd2
2 changed files with 84 additions and 28 deletions

View File

@ -3,6 +3,7 @@ package lexer
import "io" import "io"
import "fmt" import "fmt"
import "github.com/sashakoshka/arf/file" import "github.com/sashakoshka/arf/file"
import "github.com/sashakoshka/arf/types"
// LexingOperation holds information about an ongoing lexing operataion. // LexingOperation holds information about an ongoing lexing operataion.
type LexingOperation struct { type LexingOperation struct {
@ -32,6 +33,8 @@ func (lexer *LexingOperation) tokenize () (err error) {
if err != nil { return } if err != nil { return }
for { for {
fmt.Println(string(lexer.char))
lowercase := lexer.char >= 'a' && lexer.char <= 'z' lowercase := lexer.char >= 'a' && lexer.char <= 'z'
uppercase := lexer.char >= 'A' && lexer.char <= 'Z' uppercase := lexer.char >= 'A' && lexer.char <= 'Z'
number := lexer.char >= '0' && lexer.char <= '9' number := lexer.char >= '0' && lexer.char <= '9'
@ -41,7 +44,8 @@ func (lexer *LexingOperation) tokenize () (err error) {
lexer.nextRune() lexer.nextRune()
} else if lowercase || uppercase { } else if lowercase || uppercase {
// TODO: tokenize alpha begin // TODO: tokenize alpha begin
lexer.nextRune() err = lexer.tokenizeAlphaBeginning()
if err != nil { return }
} else { } else {
err = lexer.tokenizeSymbolBeginning() err = lexer.tokenizeSymbolBeginning()
if err != nil { return } if err != nil { return }
@ -54,13 +58,44 @@ func (lexer *LexingOperation) tokenize () (err error) {
return return
} }
func (lexer *LexingOperation) tokenizeAlphaBeginning () (err error) {
got := ""
for {
lowercase := lexer.char >= 'a' && lexer.char <= 'z'
uppercase := lexer.char >= 'A' && lexer.char <= 'Z'
number := lexer.char >= '0' && lexer.char <= '9'
if !lowercase && !uppercase && !number { break }
got += string(lexer.char)
lexer.nextRune()
}
token := Token { kind: TokenKindName, value: got }
if len(got) == 2 {
firstValid := got[0] == 'n' || got[0] == 'r' || got[0] == 'w'
secondValid := got[1] == 'n' || got[1] == 'r' || got[1] == 'w'
if firstValid && secondValid {
token.kind = TokenKindPermission
token.value = types.PermissionFrom(got)
}
}
lexer.addToken(token)
return
}
func (lexer *LexingOperation) tokenizeSymbolBeginning () (err error) { func (lexer *LexingOperation) tokenizeSymbolBeginning () (err error) {
fmt.Println(string(lexer.char))
switch lexer.char { switch lexer.char {
case '#': case '#':
// comment // comment
for lexer.char != '\n' { for lexer.char != '\n' {
lexer.nextRune() err = lexer.nextRune()
if err != nil { return }
} }
case '\t': case '\t':
// indent level // indent level
@ -80,7 +115,8 @@ func (lexer *LexingOperation) tokenizeSymbolBeginning () (err error) {
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindIndent, kind: TokenKindIndent,
}) })
lexer.nextRune() err = lexer.nextRune()
if err != nil { return }
} }
case '\n': case '\n':
// line break // line break
@ -88,94 +124,94 @@ func (lexer *LexingOperation) tokenizeSymbolBeginning () (err error) {
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindNewline, kind: TokenKindNewline,
}) })
lexer.nextRune() err = lexer.nextRune()
case '"': case '"':
// TODO: tokenize string literal // TODO: tokenize string literal
lexer.nextRune() err = lexer.nextRune()
case '\'': case '\'':
// TODO: tokenize rune literal // TODO: tokenize rune literal
lexer.nextRune() err = lexer.nextRune()
case ':': case ':':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindColon, kind: TokenKindColon,
}) })
lexer.nextRune() err = lexer.nextRune()
case '.': case '.':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindDot, kind: TokenKindDot,
}) })
lexer.nextRune() err = lexer.nextRune()
case '[': case '[':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindLBracket, kind: TokenKindLBracket,
}) })
lexer.nextRune() err = lexer.nextRune()
case ']': case ']':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindRBracket, kind: TokenKindRBracket,
}) })
lexer.nextRune() err = lexer.nextRune()
case '{': case '{':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindLBrace, kind: TokenKindLBrace,
}) })
lexer.nextRune() err = lexer.nextRune()
case '}': case '}':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindRBrace, kind: TokenKindRBrace,
}) })
lexer.nextRune() err = lexer.nextRune()
case '+': case '+':
// TODO: tokenize plus begin // TODO: tokenize plus begin
lexer.nextRune() err = lexer.nextRune()
case '-': case '-':
lexer.tokenizeDashBeginning() err = lexer.tokenizeDashBeginning()
case '*': case '*':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindAsterisk, kind: TokenKindAsterisk,
}) })
lexer.nextRune() err = lexer.nextRune()
case '/': case '/':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindSlash, kind: TokenKindSlash,
}) })
lexer.nextRune() err = lexer.nextRune()
case '@': case '@':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindAt, kind: TokenKindAt,
}) })
lexer.nextRune() err = lexer.nextRune()
case '!': case '!':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindExclamation, kind: TokenKindExclamation,
}) })
lexer.nextRune() err = lexer.nextRune()
case '%': case '%':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindPercent, kind: TokenKindPercent,
}) })
lexer.nextRune() err = lexer.nextRune()
case '~': case '~':
lexer.addToken (Token { lexer.addToken (Token {
kind: TokenKindTilde, kind: TokenKindTilde,
}) })
lexer.nextRune() err = lexer.nextRune()
case '<': case '<':
// TODO: tokenize less than begin // TODO: tokenize less than begin
lexer.nextRune() err = lexer.nextRune()
case '>': case '>':
// TODO: tokenize greater than begin // TODO: tokenize greater than begin
lexer.nextRune() err = lexer.nextRune()
case '|': case '|':
// TODO: tokenize bar begin // TODO: tokenize bar begin
lexer.nextRune() err = lexer.nextRune()
case '&': case '&':
// TODO: tokenize and begin // TODO: tokenize and begin
lexer.nextRune() err = lexer.nextRune()
default: default:
err = file.NewError ( err = file.NewError (
lexer.file.Location(), 1, lexer.file.Location(), 1,
"unexpected character " + "unexpected symbol character " +
string(lexer.char), string(lexer.char),
file.ErrorKindError) file.ErrorKindError)
return return
@ -200,6 +236,8 @@ func (lexer *LexingOperation) tokenizeDashBeginning () (err error) {
token.kind = TokenKindSeparator token.kind = TokenKindSeparator
lexer.nextRune() lexer.nextRune()
} }
lexer.addToken(token)
return return
} }

View File

@ -3,12 +3,30 @@ package types
type Mode int type Mode int
const ( const (
ModeRead = iota ModeNone = iota
ModeRead
ModeWrite ModeWrite
ModeNone
) )
type Permission struct { type Permission struct {
Internal Mode Internal Mode
External Mode External Mode
} }
func ModeFrom (char rune) (mode Mode) {
switch (char) {
case 'r': mode = ModeNone
case 'n': mode = ModeRead
case 'w': mode = ModeWrite
}
return
}
func PermissionFrom (data string) (permission Permission) {
if len(data) != 2 { return }
permission.Internal = ModeFrom(rune(data[0]))
permission.External = ModeFrom(rune(data[1]))
return
}