diff --git a/parser/argument.go b/parser/argument.go index 2f83ecc..75716ce 100644 --- a/parser/argument.go +++ b/parser/argument.go @@ -16,7 +16,7 @@ var validArgumentStartTokens = []lexer.TokenKind { lexer.TokenKindLParen, } -func (parser *ParsingOperation) parseArgument () (argument Argument, err error) { +func (parser *parsingOperation) parseArgument () (argument Argument, err error) { argument.location = parser.token.Location() err = parser.expect(validArgumentStartTokens...) diff --git a/parser/body.go b/parser/body.go index ba29ab9..eb703b0 100644 --- a/parser/body.go +++ b/parser/body.go @@ -4,7 +4,7 @@ import "git.tebibyte.media/arf/arf/lexer" import "git.tebibyte.media/arf/arf/infoerr" // parse body parses the body of an arf file, after the metadata header. -func (parser *ParsingOperation) parseBody () (err error) { +func (parser *parsingOperation) parseBody () (err error) { for { err = parser.expect(lexer.TokenKindName) if err != nil { return } diff --git a/parser/data.go b/parser/data.go index 0b0f0f3..7a4f3f8 100644 --- a/parser/data.go +++ b/parser/data.go @@ -4,7 +4,7 @@ import "git.tebibyte.media/arf/arf/types" import "git.tebibyte.media/arf/arf/lexer" // parseData parses a data section. -func (parser *ParsingOperation) parseDataSection () ( +func (parser *parsingOperation) parseDataSection () ( section DataSection, err error, ) { diff --git a/parser/dereference.go b/parser/dereference.go index 3d8d550..6f33cfa 100644 --- a/parser/dereference.go +++ b/parser/dereference.go @@ -2,7 +2,7 @@ package parser import "git.tebibyte.media/arf/arf/lexer" -func (parser *ParsingOperation) parseDereference () ( +func (parser *parsingOperation) parseDereference () ( dereference Dereference, err error, ) { diff --git a/parser/enum.go b/parser/enum.go index 4460941..93a37f4 100644 --- a/parser/enum.go +++ b/parser/enum.go @@ -5,7 +5,7 @@ import "git.tebibyte.media/arf/arf/lexer" import "git.tebibyte.media/arf/arf/infoerr" // parseEnumSection parses an enumerated type section. -func (parser *ParsingOperation) parseEnumSection () ( +func (parser *parsingOperation) parseEnumSection () ( section EnumSection, err error, ) { @@ -51,7 +51,7 @@ func (parser *ParsingOperation) parseEnumSection () ( // parseEnumMembers parses a list of members for an enum section. Indentation // level is assumed. -func (parser *ParsingOperation) parseEnumMembers ( +func (parser *parsingOperation) parseEnumMembers ( into *EnumSection, ) ( err error, @@ -70,7 +70,7 @@ func (parser *ParsingOperation) parseEnumMembers ( } // parseEnumMember parses a single enum member. Indenttion level is assumed. -func (parser *ParsingOperation) parseEnumMember () ( +func (parser *parsingOperation) parseEnumMember () ( member EnumMember, err error, ) { diff --git a/parser/face.go b/parser/face.go index 80368e7..bc065db 100644 --- a/parser/face.go +++ b/parser/face.go @@ -5,7 +5,7 @@ import "git.tebibyte.media/arf/arf/lexer" import "git.tebibyte.media/arf/arf/infoerr" // parseFaceSection parses an interface section. -func (parser *ParsingOperation) parseFaceSection () ( +func (parser *parsingOperation) parseFaceSection () ( section FaceSection, err error, ) { @@ -65,7 +65,7 @@ func (parser *ParsingOperation) parseFaceSection () ( // parseFaceBehaviors parses a list of interface behaviors for an object // interface. -func (parser *ParsingOperation) parseFaceBehaviors () ( +func (parser *parsingOperation) parseFaceBehaviors () ( behaviors map[string] FaceBehavior, err error, ) { @@ -101,7 +101,7 @@ func (parser *ParsingOperation) parseFaceBehaviors () ( } // parseFaceBehavior parses a single interface behavior. -func (parser *ParsingOperation) parseFaceBehavior ( +func (parser *parsingOperation) parseFaceBehavior ( indent int, ) ( behavior FaceBehavior, @@ -125,7 +125,7 @@ func (parser *ParsingOperation) parseFaceBehavior ( return } -func (parser *ParsingOperation) parseFaceBehaviorArguments ( +func (parser *parsingOperation) parseFaceBehaviorArguments ( indent int, ) ( inputs []Declaration, diff --git a/parser/func.go b/parser/func.go index d7f9405..c114ccd 100644 --- a/parser/func.go +++ b/parser/func.go @@ -5,7 +5,7 @@ import "git.tebibyte.media/arf/arf/lexer" import "git.tebibyte.media/arf/arf/infoerr" // parseFunc parses a function section. -func (parser *ParsingOperation) parseFuncSection () ( +func (parser *parsingOperation) parseFuncSection () ( section FuncSection, err error, ) { @@ -75,7 +75,7 @@ func (parser *ParsingOperation) parseFuncSection () ( // parseFuncArguments parses a function's inputs, outputs, and reciever if that // exists. -func (parser *ParsingOperation) parseFuncArguments ( +func (parser *parsingOperation) parseFuncArguments ( into *FuncSection, ) ( err error, diff --git a/parser/list.go b/parser/list.go index 3be30f9..e6860b4 100644 --- a/parser/list.go +++ b/parser/list.go @@ -3,7 +3,7 @@ package parser import "git.tebibyte.media/arf/arf/lexer" // parseList parses a parenthetically delimited list of arguments. -func (parser *ParsingOperation) parseList () (list List, err error) { +func (parser *parsingOperation) parseList () (list List, err error) { list.location = parser.token.Location() err = parser.expect(lexer.TokenKindLParen) diff --git a/parser/meta.go b/parser/meta.go index 6d15a2b..c4373d6 100644 --- a/parser/meta.go +++ b/parser/meta.go @@ -6,7 +6,7 @@ import "git.tebibyte.media/arf/arf/lexer" import "git.tebibyte.media/arf/arf/infoerr" // parseMeta parsese the metadata header at the top of an arf file. -func (parser *ParsingOperation) parseMeta () (err error) { +func (parser *parsingOperation) parseMeta () (err error) { cwd, _ := os.Getwd() for { diff --git a/parser/misc.go b/parser/misc.go index ebe2e00..4253bb8 100644 --- a/parser/misc.go +++ b/parser/misc.go @@ -3,7 +3,7 @@ package parser import "git.tebibyte.media/arf/arf/lexer" // parseIdentifier parses an identifier made out of dot separated names. -func (parser *ParsingOperation) parseIdentifier () ( +func (parser *parsingOperation) parseIdentifier () ( identifier Identifier, err error, ) { diff --git a/parser/parser.go b/parser/parser.go index 1e54f3f..ceb31d4 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -7,8 +7,8 @@ import "git.tebibyte.media/arf/arf/file" import "git.tebibyte.media/arf/arf/lexer" import "git.tebibyte.media/arf/arf/infoerr" -// ParsingOperation holds information about an ongoing parsing operation. -type ParsingOperation struct { +// parsingOperation holds information about an ongoing parsing operation. +type parsingOperation struct { modulePath string token lexer.Token tokens []lexer.Token @@ -34,7 +34,7 @@ func Fetch (modulePath string, skim bool) (tree SyntaxTree, err error) { } // miss, so parse the module. - parser := ParsingOperation { + parser := parsingOperation { modulePath: modulePath, skimming: skim, tree: SyntaxTree { @@ -76,7 +76,7 @@ func Fetch (modulePath string, skim bool) (tree SyntaxTree, err error) { } // parse parses a file and adds it to the syntax tree. -func (parser *ParsingOperation) parse (sourceFile *file.File) (err error) { +func (parser *parsingOperation) parse (sourceFile *file.File) (err error) { var tokens []lexer.Token tokens, err = lexer.Tokenize(sourceFile) if err != nil { return } @@ -99,7 +99,7 @@ func (parser *ParsingOperation) parse (sourceFile *file.File) (err error) { // expect takes in a list of allowed token kinds, and returns an error if the // current token isn't one of them. If the length of allowed is zero, this // function will not return an error. -func (parser *ParsingOperation) expect (allowed ...lexer.TokenKind) (err error) { +func (parser *parsingOperation) expect (allowed ...lexer.TokenKind) (err error) { if len(allowed) == 0 { return } for _, kind := range allowed { @@ -129,7 +129,7 @@ func (parser *ParsingOperation) expect (allowed ...lexer.TokenKind) (err error) } // nextToken is the same as expect, but it advances to the next token first. -func (parser *ParsingOperation) nextToken (allowed ...lexer.TokenKind) (err error) { +func (parser *parsingOperation) nextToken (allowed ...lexer.TokenKind) (err error) { parser.tokenIndex ++ if parser.tokenIndex >= len(parser.tokens) { return io.EOF } parser.token = parser.tokens[parser.tokenIndex] @@ -140,7 +140,7 @@ func (parser *ParsingOperation) nextToken (allowed ...lexer.TokenKind) (err erro // previousToken goes back one token. If the parser is already at the beginning, // this does nothing. -func (parser *ParsingOperation) previousToken () { +func (parser *parsingOperation) previousToken () { parser.tokenIndex -- if parser.tokenIndex < 0 { parser.tokenIndex = 0 } parser.token = parser.tokens[parser.tokenIndex] @@ -149,7 +149,7 @@ func (parser *ParsingOperation) previousToken () { // skipIndentLevel advances the parser, ignoring every line with an indentation // equal to or greater than the specified indent. -func (parser *ParsingOperation) skipIndentLevel (indent int) (err error) { +func (parser *parsingOperation) skipIndentLevel (indent int) (err error) { braceLevel := 0 parenLevel := 0 bracketLevel := 0 @@ -187,7 +187,7 @@ func (parser *ParsingOperation) skipIndentLevel (indent int) (err error) { } // skipWhitespace skips over newlines and indent tokens. -func (parser *ParsingOperation) skipWhitespace () (err error) { +func (parser *parsingOperation) skipWhitespace () (err error) { for { isWhitespace := parser.token.Is(lexer.TokenKindIndent) || diff --git a/parser/phrase.go b/parser/phrase.go index da04024..8bab1e7 100644 --- a/parser/phrase.go +++ b/parser/phrase.go @@ -87,7 +87,7 @@ var controlFlowKinds = []PhraseKind { } // parseBlock parses an indented block of phrases -func (parser *ParsingOperation) parseBlock ( +func (parser *parsingOperation) parseBlock ( indent int, ) ( block Block, @@ -108,7 +108,7 @@ func (parser *ParsingOperation) parseBlock ( // parseBlockLevelPhrase parses a phrase that is not being used as an argument // to something else. This method is allowed to do things like parse return // directions, and indented blocks beneath the phrase. -func (parser *ParsingOperation) parseBlockLevelPhrase ( +func (parser *parsingOperation) parseBlockLevelPhrase ( indent int, ) ( phrase Phrase, @@ -226,7 +226,7 @@ func (parser *ParsingOperation) parseBlockLevelPhrase ( // parseArgumentLevelPhrase parses a phrase that is being used as an argument to // something. It is forbidden from using return direction, and it must be // delimited by brackets. -func (parser *ParsingOperation) parseArgumentLevelPhrase () ( +func (parser *parsingOperation) parseArgumentLevelPhrase () ( phrase Phrase, err error, ) { @@ -277,7 +277,7 @@ func (parser *ParsingOperation) parseArgumentLevelPhrase () ( } // parsePhraseCommand parses the command argument of a phrase. -func (parser *ParsingOperation) parsePhraseCommand () ( +func (parser *parsingOperation) parsePhraseCommand () ( command Argument, kind PhraseKind, operator lexer.TokenKind, diff --git a/parser/type-notation.go b/parser/type-notation.go index a03fef3..2c5d6cc 100644 --- a/parser/type-notation.go +++ b/parser/type-notation.go @@ -4,7 +4,7 @@ import "git.tebibyte.media/arf/arf/lexer" import "git.tebibyte.media/arf/arf/infoerr" // parseType parses a type notation of the form Name, {Name}, etc. -func (parser *ParsingOperation) parseType () (what Type, err error) { +func (parser *parsingOperation) parseType () (what Type, err error) { err = parser.expect(lexer.TokenKindName, lexer.TokenKindLBrace) if err != nil { return } what.location = parser.token.Location() diff --git a/parser/type.go b/parser/type.go index fae0dd0..2aeed40 100644 --- a/parser/type.go +++ b/parser/type.go @@ -5,7 +5,7 @@ import "git.tebibyte.media/arf/arf/lexer" // parseTypeSection parses a type definition. It can inherit from other types, // and define new members on them. -func (parser *ParsingOperation) parseTypeSection () ( +func (parser *parsingOperation) parseTypeSection () ( section TypeSection, err error, ) { @@ -72,7 +72,7 @@ func (parser *ParsingOperation) parseTypeSection () ( } // parseTypeSectionMember parses a type section member variable. -func (parser *ParsingOperation) parseTypeSectionMember () ( +func (parser *parsingOperation) parseTypeSectionMember () ( member TypeSectionMember, err error, ) {