Leon Mika
fc43c2ce7d
- Modified long var interpolation to support dot lookups - Added a time:from-unix function and added time.Time as an object
169 lines
4.2 KiB
Go
169 lines
4.2 KiB
Go
package ucl
|
|
|
|
import (
|
|
"io"
|
|
"strings"
|
|
|
|
"github.com/alecthomas/participle/v2"
|
|
"github.com/alecthomas/participle/v2/lexer"
|
|
)
|
|
|
|
type astStringStringSpan struct {
|
|
Pos lexer.Position
|
|
Chars *string `parser:"@SingleChar"`
|
|
}
|
|
|
|
type astLongIdentDotSuffix struct {
|
|
KeyName *string `parser:"@LIIdent"`
|
|
Pipeline *astPipeline `parser:"| LILp @@ RP"`
|
|
}
|
|
|
|
type astLongIdent struct {
|
|
Pos lexer.Position
|
|
VarName string `parser:"@LIIdent"`
|
|
DotSuffix []astLongIdentDotSuffix `parser:"( LIDot @@ )*"`
|
|
}
|
|
|
|
type astDoubleStringSpan struct {
|
|
Pos lexer.Position
|
|
Chars *string `parser:"@Char"`
|
|
Escaped *string `parser:"| @Escaped"`
|
|
IdentRef *string `parser:"| @IdentRef"`
|
|
LongIdentRef *astLongIdent `parser:"| LongIdentRef @@ LIEnd"`
|
|
SubExpr *astPipeline `parser:"| StartSubExpr @@ RP"`
|
|
}
|
|
|
|
type astDoubleString struct {
|
|
Spans []astDoubleStringSpan `parser:"StringStart @@* StringEnd"`
|
|
}
|
|
|
|
type astSingleString struct {
|
|
Spans []astStringStringSpan `parser:"SingleStringStart @@* SingleStringEnd"`
|
|
}
|
|
|
|
type astLiteral struct {
|
|
StrInter *astDoubleString `parser:"@@"`
|
|
SingleStrInter *astSingleString `parser:"| @@"`
|
|
Int *int `parser:"| @Int"`
|
|
}
|
|
|
|
type astIdentNames struct {
|
|
Ident string `parser:"@Ident"`
|
|
ColonParts []string `parser:"( COLON @Ident )*"`
|
|
}
|
|
|
|
func (ai *astIdentNames) String() string {
|
|
sb := strings.Builder{}
|
|
sb.WriteString(ai.Ident)
|
|
for _, p := range ai.ColonParts {
|
|
sb.WriteRune(':')
|
|
sb.WriteString(p)
|
|
}
|
|
return sb.String()
|
|
}
|
|
|
|
type astElementPair struct {
|
|
Left astDot `parser:"@@"`
|
|
Right *astDot `parser:"( COLON @@ )? NL?"`
|
|
}
|
|
|
|
type astListOrHash struct {
|
|
EmptyList bool `parser:"@(LS RS)"`
|
|
EmptyHash bool `parser:"| @(LS COLON RS)"`
|
|
Elements []*astElementPair `parser:"| LS NL? @@+ @@* RS"`
|
|
}
|
|
|
|
type astBlock struct {
|
|
Names []string `parser:"LC NL* (PIPE @Ident+ PIPE NL*)?"`
|
|
Statements []*astStatements `parser:"@@? NL* RC"`
|
|
}
|
|
|
|
type astMaybeSub struct {
|
|
Sub *astPipeline `parser:"@@?"`
|
|
}
|
|
|
|
type astCmdArg struct {
|
|
Literal *astLiteral `parser:"@@"`
|
|
Ident *astIdentNames `parser:"| @@"`
|
|
Var *string `parser:"| DOLLAR @Ident"`
|
|
MaybeSub *astMaybeSub `parser:"| LP @@ RP"`
|
|
ListOrHash *astListOrHash `parser:"| @@"`
|
|
Block *astBlock `parser:"| @@"`
|
|
}
|
|
|
|
type astDotSuffix struct {
|
|
KeyIdent *astIdentNames `parser:"@@"`
|
|
Pipeline *astPipeline `parser:"| LP @@ RP"`
|
|
}
|
|
|
|
type astDot struct {
|
|
Arg astCmdArg `parser:"@@"`
|
|
DotSuffix []astDotSuffix `parser:"( DOT @@ )*"`
|
|
}
|
|
|
|
type astCmd struct {
|
|
Pos lexer.Position
|
|
Name astDot `parser:"@@"`
|
|
Args []astDot `parser:"@@*"`
|
|
}
|
|
|
|
type astPipeline struct {
|
|
First *astCmd `parser:"@@"`
|
|
Rest []*astCmd `parser:"( PIPE @@ )*"`
|
|
}
|
|
|
|
type astStatements struct {
|
|
First *astPipeline `parser:"@@"`
|
|
Rest []*astPipeline `parser:"( NL+ @@ )*"` // TODO: also add support for newlines
|
|
}
|
|
|
|
type astScript struct {
|
|
Statements *astStatements `parser:"NL* (@@ NL*)?"`
|
|
}
|
|
|
|
var scanner = lexer.MustStateful(lexer.Rules{
|
|
"Root": {
|
|
{"Whitespace", `[ \t]+`, nil},
|
|
{"Comment", `[#].*\s*`, nil},
|
|
{"StringStart", `"`, lexer.Push("String")},
|
|
{"SingleStringStart", `'`, lexer.Push("SingleString")},
|
|
{"Int", `[-]?[0-9][0-9]*`, nil},
|
|
{"DOLLAR", `\$`, nil},
|
|
{"COLON", `\:`, nil},
|
|
{"DOT", `[.]`, nil},
|
|
{"LP", `\(`, lexer.Push("Root")},
|
|
{"RP", `\)`, lexer.Pop()},
|
|
{"LS", `\[`, nil},
|
|
{"RS", `\]`, nil},
|
|
{"LC", `\{`, nil},
|
|
{"RC", `\}`, nil},
|
|
{"NL", `[;\n][; \n\t]*`, nil},
|
|
{"PIPE", `\|`, nil},
|
|
{"Ident", `[-]*[a-zA-Z_][\w-]*`, nil},
|
|
},
|
|
"String": {
|
|
{"Escaped", `\\.`, nil},
|
|
{"StringEnd", `"`, lexer.Pop()},
|
|
{"IdentRef", `\$[-]*[a-zA-Z_][\w-]*`, nil},
|
|
{"LongIdentRef", `\$[{]`, lexer.Push("LongIdent")},
|
|
{"StartSubExpr", `\$[(]`, lexer.Push("Root")},
|
|
{"Char", `[^$"\\]+`, nil},
|
|
},
|
|
"LongIdent": {
|
|
{"LIIdent", `[-]*[a-zA-Z_][\w-]*`, nil},
|
|
{"LIDot", `[.]`, nil},
|
|
{"LILp", `\(`, lexer.Push("Root")},
|
|
{"LIEnd", `\}`, lexer.Pop()},
|
|
},
|
|
"SingleString": {
|
|
{"SingleStringEnd", `'`, lexer.Pop()},
|
|
{"SingleChar", `[^']+`, nil},
|
|
},
|
|
})
|
|
var parser = participle.MustBuild[astScript](participle.Lexer(scanner),
|
|
participle.Elide("Whitespace", "Comment"))
|
|
|
|
func parse(r io.Reader) (*astScript, error) {
|
|
return parser.Parse("test", r)
|
|
}
|