pax_global_header00006660000000000000000000000064150530036640014514gustar00rootroot0000000000000052 comment=d70698c64070ec56f6e6abb6254775a0ad58c122 golang-github-alecthomas-participle-v2-2.1.4/000077500000000000000000000000001505300366400210445ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/.github/000077500000000000000000000000001505300366400224045ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/.github/FUNDING.yml000066400000000000000000000000251505300366400242160ustar00rootroot00000000000000github: [alecthomas] golang-github-alecthomas-participle-v2-2.1.4/.github/workflows/000077500000000000000000000000001505300366400244415ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/.github/workflows/ci.yml000066400000000000000000000012171505300366400255600ustar00rootroot00000000000000on: push: branches: - master pull_request: name: CI jobs: test: name: Test runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 - name: Init Hermit run: ./bin/hermit env -r >> $GITHUB_ENV - name: Test Participle run: go test ./... - name: Test Examples run: cd ./_examples && go test ./... lint: name: Lint runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 - name: Init Hermit run: ./bin/hermit env -r >> $GITHUB_ENV - name: golangci-lint run: golangci-lint run golang-github-alecthomas-participle-v2-2.1.4/.github/workflows/release.yml000066400000000000000000000005111505300366400266010ustar00rootroot00000000000000name: Release on: push: tags: - 'v*' jobs: release: name: Release runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: ./bin/hermit env --raw >> $GITHUB_ENV - run: goreleaser release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} golang-github-alecthomas-participle-v2-2.1.4/.golangci.yml000066400000000000000000000033331505300366400234320ustar00rootroot00000000000000run: tests: true skip-dirs: - _examples output: print-issued-lines: false linters: enable-all: true disable: - maligned - lll - gocyclo - gochecknoglobals - wsl - whitespace - godox - funlen - gocognit - gomnd - goerr113 - godot - nestif - testpackage - nolintlint - exhaustivestruct - wrapcheck - gci - gofumpt - gocritic - nlreturn - errorlint - nakedret - forbidigo - revive - cyclop - ifshort - paralleltest - interfacer - scopelint - golint - wastedassign - forcetypeassert - gomoddirectives - varnamelen - exhaustruct - ireturn - nonamedreturns - errname - nilnil - maintidx - unused # Does not work with type parameters - dupword - depguard - mnd - recvcheck - perfsprint - predeclared linters-settings: govet: check-shadowing: true gocyclo: min-complexity: 10 dupl: threshold: 100 goconst: min-len: 8 min-occurrences: 3 exhaustive: default-signifies-exhaustive: true issues: max-per-linter: 0 max-same: 0 exclude-use-default: false exclude: # Captured by errcheck. - '^(G104|G204|G307):' # Very commonly not checked. - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' - 'exported method `(.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos)` should have comment or be unexported' - 'uses unkeyed fields' - 'declaration of "err" shadows declaration' - 'bad syntax for struct tag key' - 'bad syntax for struct tag pair' - '^ST1012' golang-github-alecthomas-participle-v2-2.1.4/.goreleaser.yml000066400000000000000000000012641505300366400240000ustar00rootroot00000000000000project_name: participle release: github: owner: alecthomas name: participle brews: - install: bin.install "participle" env: - CGO_ENABLED=0 builds: - goos: - linux - darwin - windows goarch: - arm64 - amd64 - "386" goarm: - "6" dir: ./cmd/participle main: . ldflags: -s -w -X main.version={{.Version}} binary: participle archives: - format: tar.gz name_template: '{{ .Binary }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' files: - COPYING - README* snapshot: name_template: SNAPSHOT-{{ .Commit }} checksum: name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt' golang-github-alecthomas-participle-v2-2.1.4/CHANGES.md000066400000000000000000000025701505300366400224420ustar00rootroot00000000000000 - [v2](#v2) ## v2 v2 was released in November 2020. It contains the following changes, some of which are backwards-incompatible: - Added optional `LexString()` and `LexBytes()` methods that lexer definitions can implement to fast-path lexing of bytes and strings. - A new stateful lexer has been added. - A `filename` must now be passed to all `Parse*()` and `Lex*()` methods. - The `text/scanner` lexer no longer automatically unquotes strings or supports arbitary length single quoted strings. The tokens it produces are identical to that of the `text/scanner` package. Use `Unquote()` to remove quotes. - `Tok` and `EndTok` will no longer be populated. - If a field named `Token []lexer.Token` exists it will be populated with the raw tokens that the node parsed from the lexer. - Support capturing directly into lexer.Token fields. eg. type ast struct { Head lexer.Token `@Ident` Tail []lexer.Token `@(Ident*)` } - Add an `experimental/codegen` for stateful lexers. This provides ~10x performance improvement with zero garbage when lexing strings. - The `regex` lexer has been removed. - The `ebnf` lexer has been removed. - All future work on lexing will be put into the stateful lexer. - The need for `DropToken` has been removed. golang-github-alecthomas-participle-v2-2.1.4/COPYING000066400000000000000000000020441505300366400220770ustar00rootroot00000000000000Copyright (C) 2017-2022 Alec Thomas Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. golang-github-alecthomas-participle-v2-2.1.4/README.md000066400000000000000000000604001505300366400223230ustar00rootroot00000000000000# A dead simple parser package for Go [![PkgGoDev](https://pkg.go.dev/badge/github.com/alecthomas/participle/v2)](https://pkg.go.dev/github.com/alecthomas/participle/v2) [![GHA Build](https://github.com/alecthomas/participle/actions/workflows/ci.yml/badge.svg)](https://github.com/alecthomas/participle/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/alecthomas/participle/v2)](https://goreportcard.com/report/github.com/alecthomas/participle/v2) [![Slack chat](https://img.shields.io/static/v1?logo=slack&style=flat&label=slack&color=green&message=gophers)](https://gophers.slack.com/messages/CN9DS8YF3) - [V2](#v2) - [Introduction](#introduction) - [Tutorial](#tutorial) - [Tag syntax](#tag-syntax) - [Overview](#overview) - [Grammar syntax](#grammar-syntax) - [Capturing](#capturing) - [Capturing boolean value](#capturing-boolean-value) - ["Union" types](#union-types) - [Custom parsing](#custom-parsing) - [Lexing](#lexing) - [Stateful lexer](#stateful-lexer) - [Example stateful lexer](#example-stateful-lexer) - [Example simple/non-stateful lexer](#example-simplenon-stateful-lexer) - [Experimental - code generation](#experimental---code-generation) - [Options](#options) - [Examples](#examples) - [Performance](#performance) - [Concurrency](#concurrency) - [Error reporting](#error-reporting) - [Comments](#comments) - [Limitations](#limitations) - [EBNF](#ebnf) - [Syntax/Railroad Diagrams](#syntaxrailroad-diagrams) ## V2 This is version 2 of Participle. It can be installed with: ```shell $ go get github.com/alecthomas/participle/v2@latest ``` The latest version from v0 can be installed via: ```shell $ go get github.com/alecthomas/participle@latest ``` ## Introduction The goal of this package is to provide a simple, idiomatic and elegant way of defining parsers in Go. Participle's method of defining grammars should be familiar to any Go programmer who has used the `encoding/json` package: struct field tags define what and how input is mapped to those same fields. This is not unusual for Go encoders, but is unusual for a parser. ## Tutorial A [tutorial](TUTORIAL.md) is available, walking through the creation of an .ini parser. ## Tag syntax Participle supports two forms of struct tag grammar syntax. The easiest to read is when the grammar uses the entire struct tag content, eg. ```go Field string `@Ident @("," Ident)*` ``` However, this does not coexist well with other tags such as JSON, etc. and may cause issues with linters. If this is an issue then you can use the `parser:""` tag format. In this case single quotes can be used to quote literals making the tags somewhat easier to write, eg. ```go Field string `parser:"@ident (',' Ident)*" json:"field"` ``` ## Overview A grammar is an annotated Go structure used to both define the parser grammar, and be the AST output by the parser. As an example, following is the final INI parser from the tutorial. ```go type INI struct { Properties []*Property `@@*` Sections []*Section `@@*` } type Section struct { Identifier string `"[" @Ident "]"` Properties []*Property `@@*` } type Property struct { Key string `@Ident "="` Value *Value `@@` } type Value struct { String *string ` @String` Float *float64 `| @Float` Int *int `| @Int` } ``` > **Note:** Participle also supports named struct tags (eg. Hello string `parser:"@Ident"`). A parser is constructed from a grammar and a lexer: ```go parser, err := participle.Build[INI]() ``` Once constructed, the parser is applied to input to produce an AST: ```go ast, err := parser.ParseString("", "size = 10") // ast == &INI{ // Properties: []*Property{ // {Key: "size", Value: &Value{Int: &10}}, // }, // } ``` ## Grammar syntax Participle grammars are defined as tagged Go structures. Participle will first look for tags in the form `parser:"..."`. It will then fall back to using the entire tag body. The grammar format is: - `@` Capture expression into the field. - `@@` Recursively capture using the fields own type. - `` Match named lexer token. - `( ... )` Group. - `"..."` or `'...'` Match the literal (note that the lexer must emit tokens matching this literal exactly). - `"...":` Match the literal, specifying the exact lexer token type to match. - ` ...` Match expressions. - ` | | ...` Match one of the alternatives. Each alternative is tried in order, with backtracking. - `~` Match any token that is _not_ the start of the expression (eg: `@~";"` matches anything but the `;` character into the field). - `(?= ... )` Positive lookahead group - requires the contents to match further input, without consuming it. - `(?! ... )` Negative lookahead group - requires the contents not to match further input, without consuming it. The following modifiers can be used after any expression: - `*` Expression can match zero or more times. - `+` Expression must match one or more times. - `?` Expression can match zero or once. - `!` Require a non-empty match (this is useful with a sequence of optional matches eg. `("a"? "b"? "c"?)!`). Notes: - Each struct is a single production, with each field applied in sequence. - `@` is the mechanism for capturing matches into the field. - if a struct field is not keyed with "parser", the entire struct tag will be used as the grammar fragment. This allows the grammar syntax to remain clear and simple to maintain. ## Capturing Prefixing any expression in the grammar with `@` will capture matching values for that expression into the corresponding field. For example: ```go // The grammar definition. type Grammar struct { Hello string `@Ident` } // The source text to parse. source := "world" // After parsing, the resulting AST. result == &Grammar{ Hello: "world", } ``` For slice and string fields, each instance of `@` will accumulate into the field (including repeated patterns). Accumulation into other types is not supported. For integer and floating point types, a successful capture will be parsed with `strconv.ParseInt()` and `strconv.ParseFloat()` respectively. A successful capture match into a `bool` field will set the field to true. Tokens can also be captured directly into fields of type `lexer.Token` and `[]lexer.Token`. Custom control of how values are captured into fields can be achieved by a field type implementing the `Capture` interface (`Capture(values []string) error`). Additionally, any field implementing the `encoding.TextUnmarshaler` interface will be capturable too. One caveat is that `UnmarshalText()` will be called once for each captured token, so eg. `@(Ident Ident Ident)` will be called three times. ### Capturing boolean value By default, a boolean field is used to indicate that a match occurred, which turns out to be much more useful and common in Participle than parsing true or false literals. For example, parsing a variable declaration with a trailing optional syntax: ```go type Var struct { Name string `"var" @Ident` Type string `":" @Ident` Optional bool `@"?"?` } ``` In practice this gives more useful ASTs. If bool were to be parsed literally then you'd need to have some alternate type for Optional such as string or a custom type. To capture literal boolean values such as `true` or `false`, implement the Capture interface like so: ```go type Boolean bool func (b *Boolean) Capture(values []string) error { *b = values[0] == "true" return nil } type Value struct { Float *float64 ` @Float` Int *int `| @Int` String *string `| @String` Bool *Boolean `| @("true" | "false")` } ``` ## "Union" types A very common pattern in parsers is "union" types, an example of which is shown above in the `Value` type. A common way of expressing this in Go is via a sealed interface, with each member of the union implementing this interface. eg. this is how the `Value` type could be expressed in this way: ```go type Value interface { value() } type Float struct { Value float64 `@Float` } func (f Float) value() {} type Int struct { Value int `@Int` } func (f Int) value() {} type String struct { Value string `@String` } func (f String) value() {} type Bool struct { Value Boolean `@("true" | "false")` } func (f Bool) value() {} ``` Thanks to the efforts of [Jacob Ryan McCollum](https://github.com/mccolljr), Participle now supports this pattern. Simply construct your parser with the `Union[T](member...T)` option, eg. ```go parser := participle.MustBuild[AST](participle.Union[Value](Float{}, Int{}, String{}, Bool{})) ``` Custom parsers may also be defined for union types with the [ParseTypeWith](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseTypeWith) option. ## Custom parsing There are three ways of defining custom parsers for nodes in the grammar: 1. Implement the [Capture](https://pkg.go.dev/github.com/alecthomas/participle/v2#Capture) interface. 2. Implement the [Parseable](https://pkg.go.dev/github.com/alecthomas/participle/v2#Parseable) interface. 3. Use the [ParseTypeWith](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseTypeWith) option to specify a custom parser for union interface types. ## Lexing Participle relies on distinct lexing and parsing phases. The lexer takes raw bytes and produces tokens which the parser consumes. The parser transforms these tokens into Go values. The default lexer, if one is not explicitly configured, is based on the Go `text/scanner` package and thus produces tokens for C/Go-like source code. This is surprisingly useful, but if you do require more control over lexing the included stateful [`participle/lexer`](#markdown-stateful-lexer) lexer should cover most other cases. If that in turn is not flexible enough, you can implement your own lexer. Configure your parser with a lexer using the `participle.Lexer()` option. To use your own Lexer you will need to implement two interfaces: [Definition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Definition) (and optionally [StringsDefinition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#StringDefinition) and [BytesDefinition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#BytesDefinition)) and [Lexer](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Lexer). ### Stateful lexer In addition to the default lexer, Participle includes an optional stateful/modal lexer which provides powerful yet convenient construction of most lexers. (Notably, indentation based lexers cannot be expressed using the `stateful` lexer -- for discussion of how these lexers can be implemented, see [#20](https://github.com/alecthomas/participle/issues/20)). It is sometimes the case that a simple lexer cannot fully express the tokens required by a parser. The canonical example of this is interpolated strings within a larger language. eg. ```go let a = "hello ${name + ", ${last + "!"}"}" ``` This is impossible to tokenise with a normal lexer due to the arbitrarily deep nesting of expressions. To support this case Participle's lexer is now stateful by default. The lexer is a state machine defined by a map of rules keyed by the state name. Each rule within the state includes the name of the produced token, the regex to match, and an optional operation to apply when the rule matches. As a convenience, any `Rule` starting with a lowercase letter will be elided from output, though it is recommended to use `participle.Elide()` instead, as it better integrates with the parser. Lexing starts in the `Root` group. Each rule is matched in order, with the first successful match producing a lexeme. If the matching rule has an associated Action it will be executed. A state change can be introduced with the Action `Push(state)`. `Pop()` will return to the previous state. To reuse rules from another state, use `Include(state)`. A special named rule `Return()` can also be used as the final rule in a state to always return to the previous state. As a special case, regexes containing backrefs in the form `\N` (where `N` is a digit) will match the corresponding capture group from the immediate parent group. This can be used to parse, among other things, heredocs. See the [tests](https://github.com/alecthomas/participle/blob/master/lexer/stateful_test.go#L59) for an example of this, among others. ### Example stateful lexer Here's a cut down example of the string interpolation described above. Refer to the [stateful example](https://github.com/alecthomas/participle/tree/master/_examples/stateful) for the corresponding parser. ```go var lexer = lexer.Must(Rules{ "Root": { {`String`, `"`, Push("String")}, }, "String": { {"Escaped", `\\.`, nil}, {"StringEnd", `"`, Pop()}, {"Expr", `\${`, Push("Expr")}, {"Char", `[^$"\\]+`, nil}, }, "Expr": { Include("Root"), {`whitespace`, `\s+`, nil}, {`Oper`, `[-+/*%]`, nil}, {"Ident", `\w+`, nil}, {"ExprEnd", `}`, Pop()}, }, }) ``` ### Example simple/non-stateful lexer Other than the default and stateful lexers, it's easy to define your own _stateless_ lexer using the `lexer.MustSimple()` and `lexer.NewSimple()` functions. These functions accept a slice of `lexer.SimpleRule{}` objects consisting of a key and a regex-style pattern. > **Note:** The stateful lexer replaces the old regex lexer. For example, the lexer for a form of BASIC: ```go var basicLexer = lexer.MustSimple([]lexer.SimpleRule{ {"Comment", `(?i)rem[^\n]*`}, {"String", `"(\\"|[^"])*"`}, {"Number", `[-+]?(\d*\.)?\d+`}, {"Ident", `[a-zA-Z_]\w*`}, {"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`}, {"EOL", `[\n\r]+`}, {"whitespace", `[ \t]+`}, }) ``` ### Experimental - code generation Participle v2 now has experimental support for generating code to perform lexing. This will generally provide around a 10x improvement in lexing performance while producing O(1) garbage. To use: 1. Serialize the `stateful` lexer definition to a JSON file (pass to `json.Marshal`). 2. Run the `participle` command (see `scripts/participle`) to generate go code from the lexer JSON definition. For example: ``` participle gen lexer [--name SomeCustomName] < mylexer.json | gofmt > mypackage/mylexer.go ``` (see `genLexer` in `conformance_test.go` for a more detailed example) 3. When constructing your parser, use the generated lexer for your lexer definition, such as: ``` var ParserDef = participle.MustBuild[someGrammer](participle.Lexer(mylexer.SomeCustomnameLexer)) ``` Consider contributing to the tests in `conformance_test.go` if they do not appear to cover the types of expressions you are using the generated lexer. **Known limitations of the code generated lexer:** * The lexer is always greedy. e.g., the regex `"[A-Z][A-Z][A-Z]?T"` will not match `"EST"` in the generated lexer because the quest operator is a greedy match and does not "give back" to try other possibilities; you can overcome by using `|` if you have a non-greedy match, e.g., `"[A-Z][A-Z]|(?:[A-Z]T|T)"` will produce correct results in both lexers (see [#276](https://github.com/alecthomas/participle/issues/276) for more detail); this limitation allows the generated lexer to be very fast and memory efficient * Backreferences in regular expressions are not currently supported ## Options The Parser's behaviour can be configured via [Options](https://pkg.go.dev/github.com/alecthomas/participle/v2#Option). ## Examples There are several [examples included](https://github.com/alecthomas/participle/tree/master/_examples), some of which are linked directly here. These examples should be run from the `_examples` subdirectory within a cloned copy of this repository. Example | Description --------|--------------- [BASIC](https://github.com/alecthomas/participle/tree/master/_examples/basic) | A lexer, parser and interpreter for a [rudimentary dialect](https://caml.inria.fr/pub/docs/oreilly-book/html/book-ora058.html) of BASIC. [EBNF](https://github.com/alecthomas/participle/tree/master/_examples/ebnf) | Parser for the form of EBNF used by Go. [Expr](https://github.com/alecthomas/participle/tree/master/_examples/expr) | A basic mathematical expression parser and evaluator. [GraphQL](https://github.com/alecthomas/participle/tree/master/_examples/graphql) | Lexer+parser for GraphQL schemas [HCL](https://github.com/alecthomas/participle/tree/master/_examples/hcl) | A parser for the [HashiCorp Configuration Language](https://github.com/hashicorp/hcl). [INI](https://github.com/alecthomas/participle/tree/master/_examples/ini) | An INI file parser. [Protobuf](https://github.com/alecthomas/participle/tree/master/_examples/protobuf) | A full [Protobuf](https://developers.google.com/protocol-buffers/) version 2 and 3 parser. [SQL](https://github.com/alecthomas/participle/tree/master/_examples/sql) | A *very* rudimentary SQL SELECT parser. [Stateful](https://github.com/alecthomas/participle/tree/master/_examples/stateful) | A basic example of a stateful lexer and corresponding parser. [Thrift](https://github.com/alecthomas/participle/tree/master/_examples/thrift) | A full [Thrift](https://thrift.apache.org/docs/idl) parser. [TOML](https://github.com/alecthomas/participle/tree/master/_examples/toml) | A [TOML](https://github.com/toml-lang/toml) parser. Included below is a full GraphQL lexer and parser: ```go package main import ( "fmt" "os" "github.com/alecthomas/kong" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) type File struct { Entries []*Entry `@@*` } type Entry struct { Type *Type ` @@` Schema *Schema `| @@` Enum *Enum `| @@` Scalar string `| "scalar" @Ident` } type Enum struct { Name string `"enum" @Ident` Cases []string `"{" @Ident* "}"` } type Schema struct { Fields []*Field `"schema" "{" @@* "}"` } type Type struct { Name string `"type" @Ident` Implements string `( "implements" @Ident )?` Fields []*Field `"{" @@* "}"` } type Field struct { Name string `@Ident` Arguments []*Argument `( "(" ( @@ ( "," @@ )* )? ")" )?` Type *TypeRef `":" @@` Annotation string `( "@" @Ident )?` } type Argument struct { Name string `@Ident` Type *TypeRef `":" @@` Default *Value `( "=" @@ )` } type TypeRef struct { Array *TypeRef `( "[" @@ "]"` Type string ` | @Ident )` NonNullable bool `( @"!" )?` } type Value struct { Symbol string `@Ident` } var ( graphQLLexer = lexer.MustSimple([]lexer.SimpleRule{ {"Comment", `(?:#|//)[^\n]*\n?`}, {"Ident", `[a-zA-Z]\w*`}, {"Number", `(?:\d*\.)?\d+`}, {"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`}, {"Whitespace", `[ \t\n\r]+`}, }) parser = participle.MustBuild[File]( participle.Lexer(graphQLLexer), participle.Elide("Comment", "Whitespace"), participle.UseLookahead(2), ) ) var cli struct { EBNF bool `help"Dump EBNF."` Files []string `arg:"" optional:"" type:"existingfile" help:"GraphQL schema files to parse."` } func main() { ctx := kong.Parse(&cli) if cli.EBNF { fmt.Println(parser.String()) ctx.Exit(0) } for _, file := range cli.Files { r, err := os.Open(file) ctx.FatalIfErrorf(err) ast, err := parser.Parse(file, r) r.Close() repr.Println(ast) ctx.FatalIfErrorf(err) } } ``` ## Performance One of the included examples is a complete Thrift parser (shell-style comments are not supported). This gives a convenient baseline for comparing to the PEG based [pigeon](https://github.com/PuerkitoBio/pigeon), which is the parser used by [go-thrift](https://github.com/samuel/go-thrift). Additionally, the pigeon parser is utilising a generated parser, while the participle parser is built at run time. You can run the benchmarks yourself, but here's the output on my machine: BenchmarkParticipleThrift-12 5941 201242 ns/op 178088 B/op 2390 allocs/op BenchmarkGoThriftParser-12 3196 379226 ns/op 157560 B/op 2644 allocs/op On a real life codebase of 47K lines of Thrift, Participle takes 200ms and go- thrift takes 630ms, which aligns quite closely with the benchmarks. ## Concurrency A compiled `Parser` instance can be used concurrently. A `LexerDefinition` can be used concurrently. A `Lexer` instance cannot be used concurrently. ## Error reporting There are a few areas where Participle can provide useful feedback to users of your parser. 1. Errors returned by [Parser.Parse*()](https://pkg.go.dev/github.com/alecthomas/participle/v2#Parser.Parse) will be: 1. Of type [Error](https://pkg.go.dev/github.com/alecthomas/participle/v2#Error). This will contain positional information where available. 2. May either be [ParseError](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseError) or [lexer.Error](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Error) 2. Participle will make a best effort to return as much of the AST up to the error location as possible. 3. Any node in the AST containing a field `Pos lexer.Position` [^1] will be automatically populated from the nearest matching token. 4. Any node in the AST containing a field `EndPos lexer.Position` [^1] will be automatically populated from the token at the end of the node. 5. Any node in the AST containing a field `Tokens []lexer.Token` will be automatically populated with _all_ tokens captured by the node, _including_ elided tokens. [^1]: Either the concrete type or a type convertible to it, allowing user defined types to be used. These related pieces of information can be combined to provide fairly comprehensive error reporting. ## Comments Comments can be difficult to capture as in most languages they may appear almost anywhere. There are three ways of capturing comments, with decreasing fidelity. The first is to elide tokens in the parser, then add `Tokens []lexer.Token` as a field to each AST node. Comments will be included. This has the downside that there's no straightforward way to know where the comments are relative to non-comment tokens in that node. The second way is to _not_ elide comment tokens, and explicitly capture them at every location in the AST where they might occur. This has the downside that unless you place these captures in every possible valid location, users might insert valid comments that then fail to parse. The third way is to elide comment tokens and capture them where they're semantically meaningful, such as for documentation comments. Participle supports explicitly matching elided tokens for this purpose. ## Limitations Internally, Participle is a recursive descent parser with backtracking (see `UseLookahead(K)`). Among other things, this means that Participle grammars do not support left recursion. Left recursion must be eliminated by restructuring your grammar. ## EBNF The old `EBNF` lexer was removed in a major refactoring at [362b26](https://github.com/alecthomas/participle/commit/362b26640fa3dc406aa60960f7d9a5b9a909414e) -- if you have an EBNF grammar you need to implement, you can either translate it into regex-style `lexer.Rule{}` syntax or implement your own EBNF lexer you might be able to use [the old EBNF lexer](https://github.com/alecthomas/participle/blob/2403858c8b2068b4b0cf96a6b36dd7069674039b/lexer/ebnf/ebnf.go) -- as a starting point. Participle supports outputting an EBNF grammar from a Participle parser. Once the parser is constructed simply call `String()`. Participle also [includes a parser](https://pkg.go.dev/github.com/alecthomas/participle/v2/ebnf) for this form of EBNF (naturally). eg. The [GraphQL example](https://github.com/alecthomas/participle/blob/master/_examples/graphql/main.go#L15-L62) gives in the following EBNF: ```ebnf File = Entry* . Entry = Type | Schema | Enum | "scalar" ident . Type = "type" ident ("implements" ident)? "{" Field* "}" . Field = ident ("(" (Argument ("," Argument)*)? ")")? ":" TypeRef ("@" ident)? . Argument = ident ":" TypeRef ("=" Value)? . TypeRef = "[" TypeRef "]" | ident "!"? . Value = ident . Schema = "schema" "{" Field* "}" . Enum = "enum" ident "{" ident* "}" . ``` ## Syntax/Railroad Diagrams Participle includes a [command-line utility](https://github.com/alecthomas/participle/tree/master/cmd/railroad) to take an EBNF representation of a Participle grammar (as returned by `Parser.String()`) and produce a Railroad Diagram using [tabatkins/railroad-diagrams](https://github.com/tabatkins/railroad-diagrams). Here's what the GraphQL grammar looks like: ![EBNF Railroad Diagram](railroad.png) golang-github-alecthomas-participle-v2-2.1.4/TUTORIAL.md000066400000000000000000000171131505300366400226340ustar00rootroot00000000000000# Participle parser tutorial - [Introduction](#introduction) - [The complete grammar](#the-complete-grammar) - [Root of the .ini AST (structure, fields)](#root-of-the-ini-ast-structure-fields) - [.ini properties (named tokens, capturing, literals)](#ini-properties-named-tokens-capturing-literals) - [.ini property values (alternates, recursive structs, sequences)](#ini-property-values-alternates-recursive-structs-sequences) - [Complete, but limited, .ini grammar (top-level properties only)](#complete-but-limited-ini-grammar-top-level-properties-only) - [Extending our grammar to support sections](#extending-our-grammar-to-support-sections) - [(Optional) Source positional information](#optional-source-positional-information) - [Parsing using our grammar](#parsing-using-our-grammar) ## Introduction Writing a parser in Participle typically involves starting from the "root" of the AST, annotating fields with the grammar, then recursively expanding until it is complete. The AST is expressed via Go data types and the grammar is expressed through struct field tags, as a form of EBNF. The parser we're going to create for this tutorial parses .ini files like this: ```ini age = 21 name = "Bob Smith" [address] city = "Beverly Hills" postal_code = 90210 ``` ## The complete grammar I think it's useful to see the complete grammar first, to see what we're working towards. Read on below for details. ```go type INI struct { Properties []*Property `@@*` Sections []*Section `@@*` } type Section struct { Identifier string `"[" @Ident "]"` Properties []*Property `@@*` } type Property struct { Key string `@Ident "="` Value Value `@@` } type Value interface{ value() } type String struct { String string `@String` } func (String) value() {} type Number struct { Number float64 `@Float | @Int` } func (Number) value() {} ``` ## Root of the .ini AST (structure, fields) The first step is to create a root struct for our grammar. In the case of our .ini parser, this struct will contain a sequence of properties: ```go type INI struct { Properties []*Property } type Property struct { } ``` ## .ini properties (named tokens, capturing, literals) Each property in an .ini file has an identifier key: ```go type Property struct { Key string } ``` The default lexer tokenises Go source code, and includes an `Ident` token type that matches identifiers. To match this token we simply use the token type name: ```go type Property struct { Key string `Ident` } ``` This will *match* identifiers, but not *capture* them into the `Key` field. To capture input tokens into AST fields, prefix any grammar node with `@`: ```go type Property struct { Key string `@Ident` } ``` In .ini files, each key is separated from its value with a literal `=`. To match a literal, enclose the literal in double quotes: ```go type Property struct { Key string `@Ident "="` } ``` > Note: literals in the grammar must match tokens from the lexer *exactly*. In > this example if the lexer does not output `=` as a distinct token the > grammar will not match. ## .ini property values (alternates, recursive structs, sequences) For the purposes of our example we are only going to support quoted string and numeric property values. As each value can be *either* a string or a float we'll need something akin to a sum type. Participle supports this via the `Union[T any](members...T) Option` parser option. This tells the parser that when a field of interface type `T` is encountered, it should try to match each of the `members` in turn, and return the first successful match. ```go type Value interface{ value() } type String struct { String string `@String` } func (String) value() {} type Number struct { Number float64 `@Float` } func (Number) value() {} ``` Since we want to also parse integers and the default lexer differentiates between floats and integers, we need to explicitly match either. To express matching a set of alternatives such as this, we use the `|` operator: ```go type Number struct { Number float64 `@Float | @Int` } ``` > Note: the grammar can cross fields. Next, we'll match values and capture them into the `Property`. To recursively capture structs use `@@` (capture self): ```go type Property struct { Key string `@Ident "="` Value Value `@@` } ``` Now that we can parse a `Property` we need to go back to the root of the grammar. We want to parse 0 or more properties. To do this, we use `*`. Participle will accumulate each match into the slice until matching fails, then move to the next node in the grammar. ```go type INI struct { Properties []*Property `@@*` } ``` > Note: tokens can also be accumulated into strings, appending each match. ## Complete, but limited, .ini grammar (top-level properties only) We now have a functional, but limited, .ini parser! ```go type INI struct { Properties []*Property `@@*` } type Property struct { Key string `@Ident "="` Value Value `@@` } type Value interface{ value() } type String struct { String string `@String` } func (String) value() {} type Number struct { Number float64 `@Float | @Int` } func (Number) value() {} ``` ## Extending our grammar to support sections Adding support for sections is simply a matter of utilising the constructs we've just learnt. A section consists of a header identifier, and a sequence of properties: ```go type Section struct { Identifier string `"[" @Ident "]"` Properties []*Property `@@*` } ``` Simple! Now we just add a sequence of `Section`s to our root node: ```go type INI struct { Properties []*Property `@@*` Sections []*Section `@@*` } ``` And we're done! ## (Optional) Source positional information If a grammar node includes a field with the name `Pos` and type `lexer.Position`, it will be automatically populated by positional information. eg. ```go type String struct { Pos lexer.Position String string `@String` } type Number struct { Pos lexer.Position Number float64 `@Float | @Int` } ``` This is useful for error reporting. ## Parsing using our grammar To parse with this grammar we first construct the parser (we'll use the default lexer for now): ```go parser, err := participle.Build[INI]( participle.Unquote("String"), participle.Union[Value](String{}, Number{}), ) ``` Then parse a new INI file with `parser.Parse{,String,Bytes}()`: ```go ini, err := parser.ParseString("", ` age = 21 name = "Bob Smith" [address] city = "Beverly Hills" postal_code = 90210 `) ``` You can find the full example [here](_examples/ini/main.go), alongside other examples including an SQL `SELECT` parser and a full [Thrift](https://thrift.apache.org/) parser. golang-github-alecthomas-participle-v2-2.1.4/_examples/000077500000000000000000000000001505300366400230215ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/000077500000000000000000000000001505300366400241025ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/ast.go000066400000000000000000000045041505300366400252230ustar00rootroot00000000000000// nolint: golint package main import ( "io" "strings" "github.com/alecthomas/participle/v2/lexer" ) // Parse a BASIC program. func Parse(r io.Reader) (*Program, error) { program, err := basicParser.Parse("", r) if err != nil { return nil, err } program.init() return program, nil } type Program struct { Pos lexer.Position Commands []*Command `@@*` Table map[int]*Command } type Command struct { Pos lexer.Position Index int Line int `@Number` Remark *Remark `( @@` Input *Input ` | @@` Let *Let ` | @@` Goto *Goto ` | @@` If *If ` | @@` Print *Print ` | @@` Call *Call ` | @@ ) EOL` } type Remark struct { Pos lexer.Position Comment string `@Comment` } type Call struct { Pos lexer.Position Name string `@Ident` Args []*Expression `"(" ( @@ ( "," @@ )* )? ")"` } type Print struct { Pos lexer.Position Expression *Expression `"PRINT" @@` } type Input struct { Pos lexer.Position Variable string `"INPUT" @Ident` } type Let struct { Pos lexer.Position Variable string `"LET" @Ident` Value *Expression `"=" @@` } type Goto struct { Pos lexer.Position Line int `"GOTO" @Number` } type If struct { Pos lexer.Position Condition *Expression `"IF" @@` Line int `"THEN" @Number` } type Operator string func (o *Operator) Capture(s []string) error { *o = Operator(strings.Join(s, "")) return nil } type Value struct { Pos lexer.Position Number *float64 ` @Number` Variable *string `| @Ident` String *string `| @String` Call *Call `| @@` Subexpression *Expression `| "(" @@ ")"` } type Factor struct { Pos lexer.Position Base *Value `@@` Exponent *Value `( "^" @@ )?` } type OpFactor struct { Pos lexer.Position Operator Operator `@("*" | "/")` Factor *Factor `@@` } type Term struct { Pos lexer.Position Left *Factor `@@` Right []*OpFactor `@@*` } type OpTerm struct { Pos lexer.Position Operator Operator `@("+" | "-")` Term *Term `@@` } type Cmp struct { Pos lexer.Position Left *Term `@@` Right []*OpTerm `@@*` } type OpCmp struct { Pos lexer.Position Operator Operator `@("=" | "<" "=" | ">" "=" | "<" | ">" | "!" "=")` Cmp *Cmp `@@` } type Expression struct { Pos lexer.Position Left *Cmp `@@` Right []*OpCmp `@@*` } golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/eval.go000066400000000000000000000153441505300366400253670ustar00rootroot00000000000000// nolint: golint, dupl package main import ( "fmt" "io" "math" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" ) type Evaluatable interface { Evaluate(ctx *Context) (interface{}, error) } type Function func(args ...interface{}) (interface{}, error) // Context for evaluation. type Context struct { // User-provided functions. Functions map[string]Function // Vars defined during evaluation. Vars map[string]interface{} // Reader from which INPUT is read. Input io.Reader // Writer where PRINTing will write. Output io.Writer } func (p *Program) init() { p.Table = map[int]*Command{} for index, cmd := range p.Commands { cmd.Index = index p.Table[cmd.Line] = cmd } } func (v *Value) Evaluate(ctx *Context) (interface{}, error) { switch { case v.Number != nil: return *v.Number, nil case v.String != nil: return *v.String, nil case v.Variable != nil: value, ok := ctx.Vars[*v.Variable] if !ok { return nil, fmt.Errorf("unknown variable %q", *v.Variable) } return value, nil case v.Subexpression != nil: return v.Subexpression.Evaluate(ctx) case v.Call != nil: return v.Call.Evaluate(ctx) } panic("unsupported value type" + repr.String(v)) } func (f *Factor) Evaluate(ctx *Context) (interface{}, error) { base, err := f.Base.Evaluate(ctx) if err != nil { return nil, err } if f.Exponent == nil { return base, nil } baseNum, exponentNum, err := evaluateFloats(ctx, base, f.Exponent) if err != nil { return nil, participle.Errorf(f.Pos, "invalid factor: %s", err) } return math.Pow(baseNum, exponentNum), nil } func (o *OpFactor) Evaluate(ctx *Context, lhs interface{}) (interface{}, error) { lhsNumber, rhsNumber, err := evaluateFloats(ctx, lhs, o.Factor) if err != nil { return nil, participle.Errorf(o.Pos, "invalid arguments for %s: %s", o.Operator, err) } switch o.Operator { case "*": return lhsNumber * rhsNumber, nil case "/": return lhsNumber / rhsNumber, nil } panic("unreachable") } func (t *Term) Evaluate(ctx *Context) (interface{}, error) { lhs, err := t.Left.Evaluate(ctx) if err != nil { return nil, err } for _, right := range t.Right { rhs, err := right.Evaluate(ctx, lhs) if err != nil { return nil, err } lhs = rhs } return lhs, nil } func (o *OpTerm) Evaluate(ctx *Context, lhs interface{}) (interface{}, error) { lhsNumber, rhsNumber, err := evaluateFloats(ctx, lhs, o.Term) if err != nil { return nil, participle.Errorf(o.Pos, "invalid arguments for %s: %s", o.Operator, err) } switch o.Operator { case "+": return lhsNumber + rhsNumber, nil case "-": return lhsNumber - rhsNumber, nil } panic("unreachable") } func (c *Cmp) Evaluate(ctx *Context) (interface{}, error) { lhs, err := c.Left.Evaluate(ctx) if err != nil { return nil, err } for _, right := range c.Right { rhs, err := right.Evaluate(ctx, lhs) if err != nil { return nil, err } lhs = rhs } return lhs, nil } func (o *OpCmp) Evaluate(ctx *Context, lhs interface{}) (interface{}, error) { rhs, err := o.Cmp.Evaluate(ctx) if err != nil { return nil, err } switch lhs := lhs.(type) { case float64: rhs, ok := rhs.(float64) if !ok { return nil, participle.Errorf(o.Pos, "rhs of %s must be a number", o.Operator) } switch o.Operator { case "=": return lhs == rhs, nil case "!=": return lhs != rhs, nil case "<": return lhs < rhs, nil case ">": return lhs > rhs, nil case "<=": return lhs <= rhs, nil case ">=": return lhs >= rhs, nil } case string: rhs, ok := rhs.(string) if !ok { return nil, participle.Errorf(o.Pos, "rhs of %s must be a string", o.Operator) } switch o.Operator { case "=": return lhs == rhs, nil case "!=": return lhs != rhs, nil case "<": return lhs < rhs, nil case ">": return lhs > rhs, nil case "<=": return lhs <= rhs, nil case ">=": return lhs >= rhs, nil } default: return nil, participle.Errorf(o.Pos, "lhs of %s must be a number or string", o.Operator) } panic("unreachable") } func (e *Expression) Evaluate(ctx *Context) (interface{}, error) { lhs, err := e.Left.Evaluate(ctx) if err != nil { return nil, err } for _, right := range e.Right { rhs, err := right.Evaluate(ctx, lhs) if err != nil { return nil, err } lhs = rhs } return lhs, nil } func (c *Call) Evaluate(ctx *Context) (interface{}, error) { function, ok := ctx.Functions[c.Name] if !ok { return nil, participle.Errorf(c.Pos, "unknown function %q", c.Name) } args := []interface{}{} for _, arg := range c.Args { value, err := arg.Evaluate(ctx) if err != nil { return nil, err } args = append(args, value) } value, err := function(args...) if err != nil { return nil, participle.Errorf(c.Pos, "call to %s() failed", c.Name) } return value, nil } func (p *Program) Evaluate(r io.Reader, w io.Writer, functions map[string]Function) error { if len(p.Commands) == 0 { return nil } ctx := &Context{ Vars: map[string]interface{}{}, Functions: functions, Input: r, Output: w, } for index := 0; index < len(p.Commands); { cmd := p.Commands[index] switch { case cmd.Goto != nil: cmd := cmd.Goto next, ok := p.Table[cmd.Line] if !ok { return participle.Errorf(cmd.Pos, "invalid line number %d", cmd.Line) } index = next.Index continue case cmd.Remark != nil: case cmd.Let != nil: cmd := cmd.Let value, err := cmd.Value.Evaluate(ctx) if err != nil { return err } ctx.Vars[cmd.Variable] = value case cmd.Print != nil: cmd := cmd.Print value, err := cmd.Expression.Evaluate(ctx) if err != nil { return err } fmt.Fprintln(ctx.Output, value) case cmd.Input != nil: cmd := cmd.Input var value float64 _, err := fmt.Fscanln(ctx.Input, &value) if err != nil { return participle.Errorf(cmd.Pos, "invalid input: %s", err) } ctx.Vars[cmd.Variable] = value case cmd.If != nil: cmd := cmd.If condition, err := cmd.Condition.Evaluate(ctx) if err != nil { return err } if test, ok := condition.(bool); ok && test { next, ok := p.Table[cmd.Line] if !ok { return participle.Errorf(cmd.Pos, "invalid line number %d", cmd.Line) } index = next.Index continue } case cmd.Call != nil: _, err := cmd.Call.Evaluate(ctx) if err != nil { return err } default: panic("unsupported command " + repr.String(cmd)) } index++ } return nil } func evaluateFloats(ctx *Context, lhs interface{}, rhsExpr Evaluatable) (float64, float64, error) { rhs, err := rhsExpr.Evaluate(ctx) if err != nil { return 0, 0, err } lhsNumber, ok := lhs.(float64) if !ok { return 0, 0, fmt.Errorf("lhs must be a number") } rhsNumber, ok := rhs.(float64) if !ok { return 0, 0, fmt.Errorf("rhs must be a number") } return lhsNumber, rhsNumber, nil } golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/example.bas000066400000000000000000000003351505300366400262250ustar00rootroot00000000000000 5 REM inputting the argument 10 PRINT "Factorial of:" 20 INPUT A 30 LET B = 1 35 REM beginning of the loop 40 IF A <= 1 THEN 80 50 LET B = B * A 60 LET A = A - 1 70 GOTO 40 75 REM prints the result 80 PRINT B golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/hidden.bas000066400000000000000000000003151505300366400260230ustar00rootroot0000000000000010 PRINT "Give the hidden number: " 20 INPUT N 30 PRINT "Give a number: " 40 INPUT R 50 IF R = N THEN 110 60 IF R < N THEN 90 70 PRINT "C-" 80 GOTO 30 90 PRINT "C+" 100 GOTO 30 110 PRINT "CONGRATULATIONS" golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/main.go000066400000000000000000000021341505300366400253550ustar00rootroot00000000000000// nolint: golint, dupl package main import ( "os" "github.com/alecthomas/kong" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) var ( basicLexer = lexer.MustSimple([]lexer.SimpleRule{ {"Comment", `(?i)rem[^\n]*`}, {"String", `"(\\"|[^"])*"`}, {"Number", `[-+]?(\d*\.)?\d+`}, {"Ident", `[a-zA-Z_]\w*`}, {"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`}, {"EOL", `[\n\r]+`}, {"whitespace", `[ \t]+`}, }) basicParser = participle.MustBuild[Program]( participle.Lexer(basicLexer), participle.CaseInsensitive("Ident"), participle.Unquote("String"), participle.UseLookahead(2), ) cli struct { File string `arg:"" type:"existingfile" help:"File to parse."` } ) func main() { ctx := kong.Parse(&cli) r, err := os.Open(cli.File) ctx.FatalIfErrorf(err) defer r.Close() program, err := Parse(r) ctx.FatalIfErrorf(err) funcs := map[string]Function{ "ADD": func(args ...interface{}) (interface{}, error) { return args[0].(float64) + args[1].(float64), nil }, } err = program.Evaluate(os.Stdin, os.Stdout, funcs) ctx.FatalIfErrorf(err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/main_test.go000066400000000000000000000006441505300366400264200ustar00rootroot00000000000000package main import ( "strings" "testing" require "github.com/alecthomas/assert/v2" ) func TestExe(t *testing.T) { src := `5 REM inputting the argument 10 PRINT "Factorial of:" 20 INPUT A 30 LET B = 1 35 REM beginning of the loop 40 IF A <= 1 THEN 80 50 LET B = B * A 60 LET A = A - 1 70 GOTO 40 75 REM prints the result 80 PRINT B ` _, err := Parse(strings.NewReader(src)) require.NoError(t, err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/ebnf/000077500000000000000000000000001505300366400237335ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/ebnf/main.go000066400000000000000000000064721505300366400252170ustar00rootroot00000000000000package main import ( "bytes" "encoding/json" "fmt" "os" "strings" "github.com/alecthomas/kong" "github.com/alecthomas/participle/v2" ) var cli struct { JSON bool `help:"Display AST as JSON."` } type Group struct { Expression *Expression `"(" @@ ")"` } func (g *Group) String() string { return fmt.Sprintf("( %s )", g.Expression) } type Option struct { Expression *Expression `"[" @@ "]"` } func (o *Option) String() string { return fmt.Sprintf("[ %s ]", o.Expression) } type Repetition struct { Expression *Expression `"{" @@ "}"` } func (r *Repetition) String() string { return fmt.Sprintf("{ %s }", r.Expression) } type Literal struct { Start string `@String` // Lexer token "String" End string `( "…" @String )?` } func (l *Literal) String() string { if l.End != "" { return fmt.Sprintf("%q … %q", l.Start, l.End) } return fmt.Sprintf("%q", l.Start) } type Term struct { Name string `@Ident |` Literal *Literal `@@ |` Group *Group `@@ |` Option *Option `@@ |` Repetition *Repetition `@@` } func (t *Term) String() string { switch { case t.Name != "": return t.Name case t.Literal != nil: return t.Literal.String() case t.Group != nil: return t.Group.String() case t.Option != nil: return t.Option.String() case t.Repetition != nil: return t.Repetition.String() default: panic("wut") } } type Sequence struct { Terms []*Term `@@+` } func (s *Sequence) String() string { terms := []string{} for _, term := range s.Terms { terms = append(terms, term.String()) } return strings.Join(terms, " ") } type Expression struct { Alternatives []*Sequence `@@ ( "|" @@ )*` } func (e *Expression) String() string { sequences := []string{} for _, sequence := range e.Alternatives { sequences = append(sequences, sequence.String()) } return strings.Join(sequences, " | ") } type Expressions []*Expression func (e Expressions) String() string { expressions := []string{} for _, expression := range e { expressions = append(expressions, expression.String()) } return strings.Join(expressions, " ") } type Production struct { Name string `@Ident "="` Expressions Expressions `@@+ "."` } func (p *Production) String() string { expressions := []string{} for _, expression := range p.Expressions { expressions = append(expressions, expression.String()) } return fmt.Sprintf("%s = %s .", p.Name, strings.Join(expressions, " ")) } type EBNF struct { Productions []*Production `@@*` } func (e *EBNF) String() string { w := bytes.NewBuffer(nil) for _, production := range e.Productions { fmt.Fprintf(w, "%s\n", production) } return w.String() } var parser = participle.MustBuild[EBNF]() func main() { help := `An EBNF parser compatible with Go"s exp/ebnf. The grammar is in the form: Production = name "=" [ Expression ] "." . Expression = Alternative { "|" Alternative } . Alternative = Term { Term } . Term = name | token [ "…" token ] | Group | Option | Repetition . Group = "(" Expression ")" . Option = "[" Expression "]" . Repetition = "{" Expression "}" . ` ctx := kong.Parse(&cli, kong.Description(help)) ebnf, err := parser.Parse("", os.Stdin) ctx.FatalIfErrorf(err, "") if cli.JSON { bytes, _ := json.MarshalIndent(ebnf, "", " ") fmt.Printf("%s\n", bytes) } else { fmt.Print(ebnf) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/ebnf/main_test.go000066400000000000000000000007511505300366400262500ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" ) func TestExe(t *testing.T) { _, err := parser.ParseString("", ` Production = name "=" [ Expression ] "." . Expression = Alternative { "|" Alternative } . Alternative = Term { Term } . Term = name | token [ "…" token ] | Group | Option | Repetition . Group = "(" Expression ")" . Option = "[" Expression "]" . Repetition = "{" Expression "}" .`) require.NoError(t, err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/expr/000077500000000000000000000000001505300366400237775ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/expr/main.go000066400000000000000000000072101505300366400252520ustar00rootroot00000000000000// nolint: govet package main import ( "encoding/json" "fmt" "math" "os" "strings" "github.com/alecthomas/kong" "github.com/alecthomas/participle/v2" ) var cli struct { AST bool `help:"Print AST for expression."` Set map[string]float64 `short:"s" help:"Set variables."` Expression []string `arg required help:"Expression to evaluate."` } type Operator int const ( OpMul Operator = iota OpDiv OpAdd OpSub ) var operatorMap = map[string]Operator{"+": OpAdd, "-": OpSub, "*": OpMul, "/": OpDiv} func (o *Operator) Capture(s []string) error { *o = operatorMap[s[0]] return nil } // E --> T {( "+" | "-" ) T} // T --> F {( "*" | "/" ) F} // F --> P ["^" F] // P --> v | "(" E ")" | "-" T type Value struct { Number *float64 ` @(Float|Int)` Variable *string `| @Ident` Subexpression *Expression `| "(" @@ ")"` } type Factor struct { Base *Value `@@` Exponent *Value `( "^" @@ )?` } type OpFactor struct { Operator Operator `@("*" | "/")` Factor *Factor `@@` } type Term struct { Left *Factor `@@` Right []*OpFactor `@@*` } type OpTerm struct { Operator Operator `@("+" | "-")` Term *Term `@@` } type Expression struct { Left *Term `@@` Right []*OpTerm `@@*` } // Display func (o Operator) String() string { switch o { case OpMul: return "*" case OpDiv: return "/" case OpSub: return "-" case OpAdd: return "+" } panic("unsupported operator") } func (v *Value) String() string { if v.Number != nil { return fmt.Sprintf("%g", *v.Number) } if v.Variable != nil { return *v.Variable } return "(" + v.Subexpression.String() + ")" } func (f *Factor) String() string { out := f.Base.String() if f.Exponent != nil { out += " ^ " + f.Exponent.String() } return out } func (o *OpFactor) String() string { return fmt.Sprintf("%s %s", o.Operator, o.Factor) } func (t *Term) String() string { out := []string{t.Left.String()} for _, r := range t.Right { out = append(out, r.String()) } return strings.Join(out, " ") } func (o *OpTerm) String() string { return fmt.Sprintf("%s %s", o.Operator, o.Term) } func (e *Expression) String() string { out := []string{e.Left.String()} for _, r := range e.Right { out = append(out, r.String()) } return strings.Join(out, " ") } // Evaluation func (o Operator) Eval(l, r float64) float64 { switch o { case OpMul: return l * r case OpDiv: return l / r case OpAdd: return l + r case OpSub: return l - r } panic("unsupported operator") } func (v *Value) Eval(ctx Context) float64 { switch { case v.Number != nil: return *v.Number case v.Variable != nil: value, ok := ctx[*v.Variable] if !ok { panic("no such variable " + *v.Variable) } return value default: return v.Subexpression.Eval(ctx) } } func (f *Factor) Eval(ctx Context) float64 { b := f.Base.Eval(ctx) if f.Exponent != nil { return math.Pow(b, f.Exponent.Eval(ctx)) } return b } func (t *Term) Eval(ctx Context) float64 { n := t.Left.Eval(ctx) for _, r := range t.Right { n = r.Operator.Eval(n, r.Factor.Eval(ctx)) } return n } func (e *Expression) Eval(ctx Context) float64 { l := e.Left.Eval(ctx) for _, r := range e.Right { l = r.Operator.Eval(l, r.Term.Eval(ctx)) } return l } type Context map[string]float64 var parser = participle.MustBuild[Expression]() func main() { ctx := kong.Parse(&cli, kong.Description("A basic expression parser and evaluator."), kong.UsageOnError(), ) expr, err := parser.ParseString("", strings.Join(cli.Expression, " ")) ctx.FatalIfErrorf(err) if cli.AST { json.NewEncoder(os.Stdout).Encode(expr) } else { fmt.Println(expr, "=", expr.Eval(cli.Set)) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/expr/main_test.go000066400000000000000000000003671505300366400263170ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { expr, err := parser.ParseString("", `1 + 2 / 3 * (1 + 2)`) repr.Println(expr) require.NoError(t, err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/expr2/000077500000000000000000000000001505300366400240615ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/expr2/main.go000066400000000000000000000040701505300366400253350ustar00rootroot00000000000000package main import ( "strings" "github.com/alecthomas/kong" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" ) // Based on http://www.craftinginterpreters.com/parsing-expressions.html // expression → equality ; // equality → comparison ( ( "!=" | "==" ) comparison )* ; // comparison → addition ( ( ">" | ">=" | "<" | "<=" ) addition )* ; // addition → multiplication ( ( "-" | "+" ) multiplication )* ; // multiplication → unary ( ( "/" | "*" ) unary )* ; // unary → ( "!" | "-" ) unary // | primary ; // primary → NUMBER | STRING | "false" | "true" | "nil" // | "(" expression ")" ; type Expression struct { Equality *Equality `@@` } type Equality struct { Comparison *Comparison `@@` Op string `( @( "!" "=" | "=" "=" )` Next *Equality ` @@ )*` } type Comparison struct { Addition *Addition `@@` Op string `( @( ">" | ">" "=" | "<" | "<" "=" )` Next *Comparison ` @@ )*` } type Addition struct { Multiplication *Multiplication `@@` Op string `( @( "-" | "+" )` Next *Addition ` @@ )*` } type Multiplication struct { Unary *Unary `@@` Op string `( @( "/" | "*" )` Next *Multiplication ` @@ )*` } type Unary struct { Op string ` ( @( "!" | "-" )` Unary *Unary ` @@ )` Primary *Primary `| @@` } type Primary struct { Number *float64 ` @Float | @Int` String *string `| @String` Bool *Boolean `| @( "true" | "false" )` Nil bool `| @"nil"` SubExpression *Expression `| "(" @@ ")" ` } type Boolean bool func (b *Boolean) Capture(values []string) error { *b = values[0] == "true" return nil } var parser = participle.MustBuild[Expression](participle.UseLookahead(2)) func main() { var cli struct { Expr []string `arg required help:"Expression to parse."` } ctx := kong.Parse(&cli) expr, err := parser.ParseString("", strings.Join(cli.Expr, " ")) ctx.FatalIfErrorf(err) repr.Println(expr) } golang-github-alecthomas-participle-v2-2.1.4/_examples/expr2/main_test.go000066400000000000000000000016161505300366400263770ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { expr, err := parser.ParseString("", `1 + 2 / 3 * (1 + 2)`) repr.Println(expr) require.NoError(t, err) } func toPtr[T any](x T) *T { return &x } func TestExe_BoolFalse(t *testing.T) { got, err := parser.ParseString("", `1 + false`) expected := &Expression{ Equality: &Equality{ Comparison: &Comparison{ Addition: &Addition{ Multiplication: &Multiplication{ Unary: &Unary{ Primary: &Primary{ Number: toPtr(float64(1)), }, }, }, Op: "+", Next: &Addition{ Multiplication: &Multiplication{ Unary: &Unary{ Primary: &Primary{ Bool: toPtr(Boolean(false)), }, }, }, }, }, }, }, } require.NoError(t, err) require.Equal(t, expected, got) } golang-github-alecthomas-participle-v2-2.1.4/_examples/expr3/000077500000000000000000000000001505300366400240625ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/expr3/main.go000066400000000000000000000071761505300366400253500ustar00rootroot00000000000000package main import ( "strings" "github.com/alecthomas/kong" "github.com/alecthomas/participle/v2" "github.com/alecthomas/repr" ) type ( ExprString struct { Value string `@String` } ExprNumber struct { Value float64 `@Int | @Float` } ExprIdent struct { Name string `@Ident` } ExprParens struct { Inner ExprPrecAll `"(" @@ ")"` } ExprUnary struct { Op string `@("-" | "!")` Expr ExprOperand `@@` } ExprAddSub struct { Head ExprPrec2 `@@` Tail []ExprAddSubExt `@@+` } ExprAddSubExt struct { Op string `@("+" | "-")` Expr ExprPrec2 `@@` } ExprMulDiv struct { Head ExprPrec3 `@@` Tail []ExprMulDivExt `@@+` } ExprMulDivExt struct { Op string `@("*" | "/")` Expr ExprPrec3 `@@` } ExprRem struct { Head ExprOperand `@@` Tail []ExprRemExt `@@+` } ExprRemExt struct { Op string `@"%"` Expr ExprOperand `@@` } ExprPrecAll interface{ exprPrecAll() } ExprPrec2 interface{ exprPrec2() } ExprPrec3 interface{ exprPrec3() } ExprOperand interface{ exprOperand() } ) // These expression types can be matches as individual operands func (ExprIdent) exprOperand() {} func (ExprNumber) exprOperand() {} func (ExprString) exprOperand() {} func (ExprParens) exprOperand() {} func (ExprUnary) exprOperand() {} // These expression types can be matched at precedence level 3 func (ExprIdent) exprPrec3() {} func (ExprNumber) exprPrec3() {} func (ExprString) exprPrec3() {} func (ExprParens) exprPrec3() {} func (ExprUnary) exprPrec3() {} func (ExprRem) exprPrec3() {} // These expression types can be matched at precedence level 2 func (ExprIdent) exprPrec2() {} func (ExprNumber) exprPrec2() {} func (ExprString) exprPrec2() {} func (ExprParens) exprPrec2() {} func (ExprUnary) exprPrec2() {} func (ExprRem) exprPrec2() {} func (ExprMulDiv) exprPrec2() {} // These expression types can be matched at the minimum precedence level func (ExprIdent) exprPrecAll() {} func (ExprNumber) exprPrecAll() {} func (ExprString) exprPrecAll() {} func (ExprParens) exprPrecAll() {} func (ExprUnary) exprPrecAll() {} func (ExprRem) exprPrecAll() {} func (ExprMulDiv) exprPrecAll() {} func (ExprAddSub) exprPrecAll() {} type Expression struct { X ExprPrecAll `@@` } var parser = participle.MustBuild[Expression]( // This grammar requires enough lookahead to see the entire expression before // it can select the proper binary expression type - in other words, we only // know that `1 * 2 * 3 * 4` isn't the left-hand side of an addition or subtraction // expression until we know for sure that no `+` or `-` operator follows it participle.UseLookahead(99999), // Register the ExprOperand union so we can parse individual operands participle.Union[ExprOperand](ExprUnary{}, ExprIdent{}, ExprNumber{}, ExprString{}, ExprParens{}), // Register the ExprPrec3 union so we can parse expressions at precedence level 3 participle.Union[ExprPrec3](ExprRem{}, ExprUnary{}, ExprIdent{}, ExprNumber{}, ExprString{}, ExprParens{}), // Register the ExprPrec2 union so we can parse expressions at precedence level 2 participle.Union[ExprPrec2](ExprMulDiv{}, ExprRem{}, ExprUnary{}, ExprIdent{}, ExprNumber{}, ExprString{}, ExprParens{}), // Register the ExprPrecAll union so we can parse expressions at the minimum precedence level participle.Union[ExprPrecAll](ExprAddSub{}, ExprMulDiv{}, ExprRem{}, ExprUnary{}, ExprIdent{}, ExprNumber{}, ExprString{}, ExprParens{}), ) func main() { var cli struct { Expr []string `arg required help:"Expression to parse."` } ctx := kong.Parse(&cli) expr, err := parser.ParseString("", strings.Join(cli.Expr, " ")) ctx.FatalIfErrorf(err) repr.Println(expr) } golang-github-alecthomas-participle-v2-2.1.4/_examples/expr3/main_test.go000066400000000000000000000023041505300366400263730ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" ) func TestExpressionParser(t *testing.T) { type testCase struct { src string expected ExprPrecAll } for _, c := range []testCase{ {`1`, ExprNumber{1}}, {`1.5`, ExprNumber{1.5}}, {`"a"`, ExprString{`"a"`}}, {`(1)`, ExprParens{ExprNumber{1}}}, {`1 + 1`, ExprAddSub{ExprNumber{1}, []ExprAddSubExt{{"+", ExprNumber{1}}}}}, {`1 - 1`, ExprAddSub{ExprNumber{1}, []ExprAddSubExt{{"-", ExprNumber{1}}}}}, {`1 * 1`, ExprMulDiv{ExprNumber{1}, []ExprMulDivExt{{"*", ExprNumber{1}}}}}, {`1 / 1`, ExprMulDiv{ExprNumber{1}, []ExprMulDivExt{{"/", ExprNumber{1}}}}}, {`1 % 1`, ExprRem{ExprNumber{1}, []ExprRemExt{{"%", ExprNumber{1}}}}}, { `a + b - c * d / e % f`, ExprAddSub{ ExprIdent{"a"}, []ExprAddSubExt{ {"+", ExprIdent{"b"}}, {"-", ExprMulDiv{ ExprIdent{"c"}, []ExprMulDivExt{ {"*", ExprIdent{Name: "d"}}, {"/", ExprRem{ ExprIdent{"e"}, []ExprRemExt{{"%", ExprIdent{"f"}}}, }}, }, }}, }, }, }, } { actual, err := parser.ParseString("", c.src) require.NoError(t, err) require.Equal(t, c.expected, actual.X) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/expr4/000077500000000000000000000000001505300366400240635ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/expr4/main.go000066400000000000000000000053151505300366400253420ustar00rootroot00000000000000package main import ( "fmt" "strconv" "strings" "text/scanner" "github.com/alecthomas/kong" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" "github.com/alecthomas/repr" ) type operatorPrec struct{ Left, Right int } var operatorPrecs = map[string]operatorPrec{ "+": {1, 1}, "-": {1, 1}, "*": {3, 2}, "/": {5, 4}, "%": {7, 6}, } type ( Expr interface{ expr() } ExprIdent struct{ Name string } ExprString struct{ Value string } ExprNumber struct{ Value float64 } ExprParens struct{ Sub Expr } ExprUnary struct { Op string Sub Expr } ExprBinary struct { Lhs Expr Op string Rhs Expr } ) func (ExprIdent) expr() {} func (ExprString) expr() {} func (ExprNumber) expr() {} func (ExprParens) expr() {} func (ExprUnary) expr() {} func (ExprBinary) expr() {} func parseExprAny(lex *lexer.PeekingLexer) (Expr, error) { return parseExprPrec(lex, 0) } func parseExprAtom(lex *lexer.PeekingLexer) (Expr, error) { switch peek := lex.Peek(); { case peek.Type == scanner.Ident: return ExprIdent{lex.Next().Value}, nil case peek.Type == scanner.String: val, err := strconv.Unquote(lex.Next().Value) if err != nil { return nil, err } return ExprString{val}, nil case peek.Type == scanner.Int || peek.Type == scanner.Float: val, err := strconv.ParseFloat(lex.Next().Value, 64) if err != nil { return nil, err } return ExprNumber{val}, nil case peek.Value == "(": _ = lex.Next() inner, err := parseExprAny(lex) if err != nil { return nil, err } if lex.Peek().Value != ")" { return nil, fmt.Errorf("expected closing ')'") } _ = lex.Next() return ExprParens{inner}, nil default: return nil, participle.NextMatch } } func parseExprPrec(lex *lexer.PeekingLexer, minPrec int) (Expr, error) { var lhs Expr if peeked := lex.Peek(); peeked.Value == "-" || peeked.Value == "!" { op := lex.Next().Value atom, err := parseExprAtom(lex) if err != nil { return nil, err } lhs = ExprUnary{op, atom} } else { atom, err := parseExprAtom(lex) if err != nil { return nil, err } lhs = atom } for { peek := lex.Peek() prec, isOp := operatorPrecs[peek.Value] if !isOp || prec.Left < minPrec { break } op := lex.Next().Value rhs, err := parseExprPrec(lex, prec.Right) if err != nil { return nil, err } lhs = ExprBinary{lhs, op, rhs} } return lhs, nil } type Expression struct { X Expr `@@` } var parser = participle.MustBuild[Expression](participle.ParseTypeWith(parseExprAny)) func main() { var cli struct { Expr []string `arg required help:"Expression to parse."` } ctx := kong.Parse(&cli) expr, err := parser.ParseString("", strings.Join(cli.Expr, " ")) ctx.FatalIfErrorf(err) repr.Println(expr) } golang-github-alecthomas-participle-v2-2.1.4/_examples/expr4/main_test.go000066400000000000000000000027041505300366400264000ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" ) func TestCustomExprParser(t *testing.T) { type testCase struct { src string expected Expr } for _, c := range []testCase{ {`1`, ExprNumber{1}}, {`1.5`, ExprNumber{1.5}}, {`"a"`, ExprString{"a"}}, {`(1)`, ExprParens{ExprNumber{1}}}, {`1+1`, ExprBinary{ExprNumber{1}, "+", ExprNumber{1}}}, {`1-1`, ExprBinary{ExprNumber{1}, "-", ExprNumber{1}}}, {`1*1`, ExprBinary{ExprNumber{1}, "*", ExprNumber{1}}}, {`1/1`, ExprBinary{ExprNumber{1}, "/", ExprNumber{1}}}, {`1%1`, ExprBinary{ExprNumber{1}, "%", ExprNumber{1}}}, {`a - -b`, ExprBinary{ExprIdent{"a"}, "-", ExprUnary{"-", ExprIdent{"b"}}}}, { `a + b - c * d / e % f`, ExprBinary{ ExprIdent{"a"}, "+", ExprBinary{ ExprIdent{"b"}, "-", ExprBinary{ ExprIdent{"c"}, "*", ExprBinary{ ExprIdent{"d"}, "/", ExprBinary{ ExprIdent{"e"}, "%", ExprIdent{"f"}, }, }, }, }, }, }, { `a * b + c * d`, ExprBinary{ ExprBinary{ExprIdent{"a"}, "*", ExprIdent{"b"}}, "+", ExprBinary{ExprIdent{"c"}, "*", ExprIdent{"d"}}, }, }, { `(a + b) * (c + d)`, ExprBinary{ ExprParens{ExprBinary{ExprIdent{"a"}, "+", ExprIdent{"b"}}}, "*", ExprParens{ExprBinary{ExprIdent{"c"}, "+", ExprIdent{"d"}}}, }, }, } { actual, err := parser.ParseString("", c.src) require.NoError(t, err) require.Equal(t, c.expected, actual.X) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/generics/000077500000000000000000000000001505300366400246205ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/generics/main.go000066400000000000000000000020111505300366400260650ustar00rootroot00000000000000package main import ( "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" ) type Generic struct { Params []string `"<" (@Ident ","?)+ ">" (?= ("(" | ")" | "]" | ":" | ";" | "," | "." | "?" | "=" "=" | "!" "="))` } type Call struct { Params []*Expr `( @@ ","?)*` } type Terminal struct { Ident string ` @Ident` Number int `| @Int` Sub *Expr `| "(" @@ ")"` } type Expr struct { Terminal *Terminal `@@` Generic *Generic `( @@` RHS *RHS ` | @@ )?` Call *Call `( "(" @@ ")"` Reference *Expr ` | "." @@ )?` } type RHS struct { Oper string `@("<" | ">" | "=" "=" | "!" "=" | "+" | "-" | "*" | "/" | "&" "&")` RHS *Expr `@@` } var parser = participle.MustBuild[Expr](participle.UseLookahead(1024)) func main() { expr, err := parser.ParseString("", "hello < world * (1 + 3) && (world > 10)") if err != nil { panic(err) } repr.Println(expr) expr, err = parser.ParseString("", "type.method(1, 2, 3)") if err != nil { panic(err) } repr.Println(expr) } golang-github-alecthomas-participle-v2-2.1.4/_examples/go.mod000066400000000000000000000005731505300366400241340ustar00rootroot00000000000000module github.com/alecthomas/participle/v2/_examples go 1.18 require ( github.com/alecthomas/assert/v2 v2.11.0 github.com/alecthomas/go-thrift v0.0.3 github.com/alecthomas/kong v1.6.1 github.com/alecthomas/participle/v2 v2.1.1 github.com/alecthomas/repr v0.4.0 ) require github.com/hexops/gotextdiff v1.0.3 // indirect replace github.com/alecthomas/participle/v2 => ../ golang-github-alecthomas-participle-v2-2.1.4/_examples/go.sum000066400000000000000000000034401505300366400241550ustar00rootroot00000000000000github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/assert/v2 v2.10.0 h1:jjRCHsj6hBJhkmhznrCzoNpbA3zqy0fYiUcYZP/GkPY= github.com/alecthomas/assert/v2 v2.10.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/go-thrift v0.0.0-20220915213326-b383ff0e9ca1 h1:1dmVFISCxlfv+qSa2ak7TkebZ8w4kTRCqb4Uoj9MG5U= github.com/alecthomas/go-thrift v0.0.0-20220915213326-b383ff0e9ca1/go.mod h1:8dI6rFLWpVn5UKQjYBQMzTAszkI5SDMGOy7iHYbR0sw= github.com/alecthomas/go-thrift v0.0.3 h1:wKTw+PCQQqOCt+6MCLxl+lFk1/aJ4AJVd4Iek3fibk8= github.com/alecthomas/go-thrift v0.0.3/go.mod h1:8dI6rFLWpVn5UKQjYBQMzTAszkI5SDMGOy7iHYbR0sw= github.com/alecthomas/kong v0.8.1 h1:acZdn3m4lLRobeh3Zi2S2EpnXTd1mOL6U7xVml+vfkY= github.com/alecthomas/kong v0.8.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= github.com/alecthomas/kong v1.2.1 h1:E8jH4Tsgv6wCRX2nGrdPyHDUCSG83WH2qE4XLACD33Q= github.com/alecthomas/kong v1.2.1/go.mod h1:rKTSFhbdp3Ryefn8x5MOEprnRFQ7nlmMC01GKhehhBM= github.com/alecthomas/kong v1.6.1 h1:/7bVimARU3uxPD0hbryPE8qWrS3Oz3kPQoxA/H2NKG8= github.com/alecthomas/kong v1.6.1/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU= github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= golang-github-alecthomas-participle-v2-2.1.4/_examples/graphql/000077500000000000000000000000001505300366400244575ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/graphql/example.graphql000066400000000000000000000017541505300366400275010ustar00rootroot00000000000000# A comment. type Tweet { id: ID! # The tweet text. No more than 140 characters! body: String # When the tweet was published date: Date # Who published the tweet Author: User # Views, retweets, likes, etc Stats: Stat } type User { id: ID! username: String first_name: String last_name: String full_name: String name: String @deprecated avatar_url: Url } type Stat { views: Int likes: Int retweets: Int responses: Int } type Notification { id: ID date: Date type: String } type Meta { count: Int } scalar Url scalar Date type Query { Tweet(id: ID!): Tweet Tweets(limit: Int, skip: Int, sort_field: String, sort_order: String): [Tweet] TweetsMeta: Meta User(id: ID!): User Notifications(limit: Int): [Notification] NotificationsMeta: Meta } type Mutation { createTweet ( body: String ): Tweet deleteTweet(id: ID!): Tweet markTweetRead(id: ID!): Boolean } golang-github-alecthomas-participle-v2-2.1.4/_examples/graphql/main.go000066400000000000000000000036471505300366400257440ustar00rootroot00000000000000package main import ( "fmt" "os" "github.com/alecthomas/kong" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) type File struct { Entries []*Entry `@@*` } type Entry struct { Type *Type ` @@` Schema *Schema `| @@` Enum *Enum `| @@` Scalar string `| "scalar" @Ident` } type Enum struct { Name string `"enum" @Ident` Cases []string `"{" @Ident* "}"` } type Schema struct { Fields []*Field `"schema" "{" @@* "}"` } type Type struct { Name string `"type" @Ident` Implements string `( "implements" @Ident )?` Fields []*Field `"{" @@* "}"` } type Field struct { Name string `@Ident` Arguments []*Argument `( "(" ( @@ ( "," @@ )* )? ")" )?` Type *TypeRef `":" @@` Annotation string `( "@" @Ident )?` } type Argument struct { Name string `@Ident` Type *TypeRef `":" @@` Default *Value `( "=" @@ )?` } type TypeRef struct { Array *TypeRef `( "[" @@ "]"` Type string ` | @Ident )` NonNullable bool `@"!"?` } type Value struct { Symbol string `@Ident` } var ( graphQLLexer = lexer.MustSimple([]lexer.SimpleRule{ {"Comment", `(?:#|//)[^\n]*\n?`}, {"Ident", `[a-zA-Z]\w*`}, {"Number", `(?:\d*\.)?\d+`}, {"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`}, {"Whitespace", `[ \t\n\r]+`}, }) parser = participle.MustBuild[File]( participle.Lexer(graphQLLexer), participle.Elide("Comment", "Whitespace"), participle.UseLookahead(2), ) ) var cli struct { EBNF bool `help"Dump EBNF."` Files []string `arg:"" optional:"" type:"existingfile" help:"GraphQL schema files to parse."` } func main() { ctx := kong.Parse(&cli) if cli.EBNF { fmt.Println(parser.String()) ctx.Exit(0) } for _, file := range cli.Files { r, err := os.Open(file) ctx.FatalIfErrorf(err) ast, err := parser.Parse("", r) r.Close() repr.Println(ast) ctx.FatalIfErrorf(err) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/graphql/main_test.go000066400000000000000000000005321505300366400267710ustar00rootroot00000000000000package main import ( "io/ioutil" "testing" require "github.com/alecthomas/assert/v2" ) func BenchmarkParser(b *testing.B) { source, err := ioutil.ReadFile("example.graphql") require.NoError(b, err) b.ReportAllocs() b.ReportMetric(float64(len(source)*b.N), "B/s") for i := 0; i < b.N; i++ { _, _ = parser.ParseBytes("", source) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/hcl/000077500000000000000000000000001505300366400235675ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/hcl/example.hcl000066400000000000000000000011721505300366400257130ustar00rootroot00000000000000region = "us-west-2" access_key = "something" secret_key = "something_else" bucket = "backups" directory config { source_dir = "/etc/eventstore" dest_prefix = "escluster/config" exclude = ["*.hcl"] pre_backup_script = "before_backup.sh" post_backup_script = "after_backup.sh" pre_restore_script = "before_restore.sh" post_restore_script = "after_restore.sh" chmod = 0755 } directory data { source_dir = "/var/lib/eventstore" dest_prefix = "escluster/a/data" exclude = [ "*.merging" ] pre_restore_script = "before_restore.sh" post_restore_script = "after_restore.sh" } golang-github-alecthomas-participle-v2-2.1.4/_examples/hcl/main.go000066400000000000000000000026421505300366400250460ustar00rootroot00000000000000// Package main implements a parser for HashiCorp's HCL configuration syntax. package main import ( "fmt" "os" "strings" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" ) type Bool bool func (b *Bool) Capture(v []string) error { *b = v[0] == "true"; return nil } type Value struct { Boolean *Bool ` @("true"|"false")` Identifier *string `| @Ident ( @"." @Ident )*` String *string `| @(String|Char|RawString)` Number *float64 `| @(Float|Int)` Array []*Value `| "[" ( @@ ","? )* "]"` } func (l *Value) GoString() string { switch { case l.Boolean != nil: return fmt.Sprintf("%v", *l.Boolean) case l.Identifier != nil: return fmt.Sprintf("`%s`", *l.Identifier) case l.String != nil: return fmt.Sprintf("%q", *l.String) case l.Number != nil: return fmt.Sprintf("%v", *l.Number) case l.Array != nil: out := []string{} for _, v := range l.Array { out = append(out, v.GoString()) } return fmt.Sprintf("[]*Value{ %s }", strings.Join(out, ", ")) } panic("??") } type Entry struct { Key string `@Ident` Value *Value `( "=" @@` Block *Block ` | @@ )` } type Block struct { Parameters []*Value `@@*` Entries []*Entry `"{" @@* "}"` } type Config struct { Entries []*Entry `@@*` } var parser = participle.MustBuild[Config](participle.Unquote()) func main() { expr, err := parser.Parse("", os.Stdin) if err != nil { panic(err) } repr.Println(expr) } golang-github-alecthomas-participle-v2-2.1.4/_examples/hcl/main_test.go000066400000000000000000000015351505300366400261050ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { ast, err := parser.ParseString("", ` region = "us-west-2" access_key = "something" secret_key = "something_else" bucket = "backups" directory config { source_dir = "/etc/eventstore" dest_prefix = "escluster/config" exclude = ["*.hcl"] pre_backup_script = "before_backup.sh" post_backup_script = "after_backup.sh" pre_restore_script = "before_restore.sh" post_restore_script = "after_restore.sh" chmod = 0755 } directory data { source_dir = "/var/lib/eventstore" dest_prefix = "escluster/a/data" exclude = [ "*.merging" ] pre_restore_script = "before_restore.sh" post_restore_script = "after_restore.sh" } `) repr.Println(ast) require.NoError(t, err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/ini/000077500000000000000000000000001505300366400236005ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/ini/example.ini000066400000000000000000000001551505300366400257350ustar00rootroot00000000000000a = "a" b = 123 # A comment [numbers] a = 10.3 b = 20 ; Another comment [strings] a = "\"quoted\"" b = "b" golang-github-alecthomas-participle-v2-2.1.4/_examples/ini/main.go000066400000000000000000000023701505300366400250550ustar00rootroot00000000000000package main import ( "os" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) // A custom lexer for INI files. This illustrates a relatively complex Regexp lexer, as well // as use of the Unquote filter, which unquotes string tokens. var ( iniLexer = lexer.MustSimple([]lexer.SimpleRule{ {`Ident`, `[a-zA-Z][a-zA-Z_\d]*`}, {`String`, `"(?:\\.|[^"])*"`}, {`Float`, `\d+(?:\.\d+)?`}, {`Punct`, `[][=]`}, {"comment", `[#;][^\n]*`}, {"whitespace", `\s+`}, }) parser = participle.MustBuild[INI]( participle.Lexer(iniLexer), participle.Unquote("String"), participle.Union[Value](String{}, Number{}), ) ) type INI struct { Properties []*Property `@@*` Sections []*Section `@@*` } type Section struct { Identifier string `"[" @Ident "]"` Properties []*Property `@@*` } type Property struct { Key string `@Ident "="` Value Value `@@` } type Value interface{ value() } type String struct { String string `@String` } func (String) value() {} type Number struct { Number float64 `@Float` } func (Number) value() {} func main() { ini, err := parser.Parse("", os.Stdin) repr.Println(ini, repr.Indent(" "), repr.OmitEmpty(true)) if err != nil { panic(err) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/ini/main_test.go000066400000000000000000000004071505300366400261130ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { ini, err := parser.ParseString("", ` global = 1 [section] value = "str" `) require.NoError(t, err) repr.Println(ini) } golang-github-alecthomas-participle-v2-2.1.4/_examples/json/000077500000000000000000000000001505300366400237725ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/json/main.go000066400000000000000000000034311505300366400252460ustar00rootroot00000000000000// nolint: golint, dupl package main import ( "os" "github.com/alecthomas/kong" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) var ( jsonLexer = lexer.MustSimple([]lexer.SimpleRule{ {Name: "Comment", Pattern: `\/\/[^\n]*`}, {Name: "String", Pattern: `"(\\"|[^"])*"`}, {Name: "Number", Pattern: `[-+]?(\d*\.)?\d+`}, {Name: "Punct", Pattern: `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`}, {Name: "Null", Pattern: "null"}, {Name: "True", Pattern: "true"}, {Name: "False", Pattern: "false"}, {Name: "EOL", Pattern: `[\n\r]+`}, {Name: "Whitespace", Pattern: `[ \t]+`}, }) jsonParser = participle.MustBuild[Json]( participle.Lexer(jsonLexer), participle.Unquote("String"), participle.Elide("Whitespace", "EOL"), participle.UseLookahead(2), ) cli struct { File string `arg:"" type:"existingfile" help:"File to parse."` } ) // Parse a Json string. func Parse(data []byte) (*Json, error) { json, err := jsonParser.ParseBytes("", data) if err != nil { return nil, err } return json, nil } type Json struct { Pos lexer.Position Object *Object `parser:"@@ |"` Array *Array `parser:"@@ |"` Number *string `parser:"@Number |"` String *string `parser:"@String |"` False *string `parser:"@False |"` True *string `parser:"@True |"` Null *string `parser:"@Null"` } type Object struct { Pos lexer.Position Pairs []*Pair `parser:"'{' @@ (',' @@)* '}'"` } type Pair struct { Pos lexer.Position Key string `parser:"@String ':'"` Value *Json `parser:"@@"` } type Array struct { Pos lexer.Position Items []*Json `parser:"'[' @@ (',' @@)* ']'"` } func main() { ctx := kong.Parse(&cli) data, err := os.ReadFile(cli.File) ctx.FatalIfErrorf(err) res, err := Parse(data) ctx.FatalIfErrorf(err) ctx.Printf("res is: %v", res) } golang-github-alecthomas-participle-v2-2.1.4/_examples/json/main_test.go000066400000000000000000000003661505300366400263110ustar00rootroot00000000000000package main import ( "os" "testing" require "github.com/alecthomas/assert/v2" ) func TestParse(t *testing.T) { src, err := os.ReadFile("./test.json") require.NoError(t, err) _, err = Parse(src) require.NoError(t, err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/json/test.json000066400000000000000000000003461505300366400256470ustar00rootroot00000000000000{ "list": [1, 1.2, 1, -1, {"foo": "bar"}, true, false, null], "object": { "foo1": "bar2", "foo2": true, "foo3": false, "foo4": null, "foo5": 1, "foo6": "ss" } }golang-github-alecthomas-participle-v2-2.1.4/_examples/jsonpath/000077500000000000000000000000001505300366400246475ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/jsonpath/github-webhook.json000066400000000000000000000332341505300366400304650ustar00rootroot00000000000000{ "action": "created", "check_run": { "id": 128620228, "node_id": "MDg6Q2hlY2tSdW4xMjg2MjAyMjg=", "head_sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821", "external_id": "", "url": "https://api.github.com/repos/Codertocat/Hello-World/check-runs/128620228", "html_url": "https://github.com/Codertocat/Hello-World/runs/128620228", "details_url": "https://octocoders.io", "status": "queued", "conclusion": null, "started_at": "2019-05-15T15:21:12Z", "completed_at": null, "output": { "title": null, "summary": null, "text": null, "annotations_count": 0, "annotations_url": "https://api.github.com/repos/Codertocat/Hello-World/check-runs/128620228/annotations" }, "name": "Octocoders-linter", "check_suite": { "id": 118578147, "node_id": "MDEwOkNoZWNrU3VpdGUxMTg1NzgxNDc=", "head_branch": "changes", "head_sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821", "status": "queued", "conclusion": null, "url": "https://api.github.com/repos/Codertocat/Hello-World/check-suites/118578147", "before": "6113728f27ae82c7b1a177c8d03f9e96e0adf246", "after": "ec26c3e57ca3a959ca5aad62de7213c562f8c821", "pull_requests": [ { "url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2", "id": 279147437, "number": 2, "head": { "ref": "changes", "sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821", "repo": { "id": 186853002, "url": "https://api.github.com/repos/Codertocat/Hello-World", "name": "Hello-World" } }, "base": { "ref": "master", "sha": "f95f852bd8fca8fcc58a9a2d6c842781e32a215e", "repo": { "id": 186853002, "url": "https://api.github.com/repos/Codertocat/Hello-World", "name": "Hello-World" } } } ], "app": { "id": 29310, "node_id": "MDM6QXBwMjkzMTA=", "owner": { "login": "Octocoders", "id": 38302899, "node_id": "MDEyOk9yZ2FuaXphdGlvbjM4MzAyODk5", "avatar_url": "https://avatars1.githubusercontent.com/u/38302899?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Octocoders", "html_url": "https://github.com/Octocoders", "followers_url": "https://api.github.com/users/Octocoders/followers", "following_url": "https://api.github.com/users/Octocoders/following{/other_user}", "gists_url": "https://api.github.com/users/Octocoders/gists{/gist_id}", "starred_url": "https://api.github.com/users/Octocoders/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Octocoders/subscriptions", "organizations_url": "https://api.github.com/users/Octocoders/orgs", "repos_url": "https://api.github.com/users/Octocoders/repos", "events_url": "https://api.github.com/users/Octocoders/events{/privacy}", "received_events_url": "https://api.github.com/users/Octocoders/received_events", "type": "Organization", "site_admin": false }, "name": "octocoders-linter", "description": "", "external_url": "https://octocoders.io", "html_url": "https://github.com/apps/octocoders-linter", "created_at": "2019-04-19T19:36:24Z", "updated_at": "2019-04-19T19:36:56Z", "permissions": { "administration": "write", "checks": "write", "contents": "write", "deployments": "write", "issues": "write", "members": "write", "metadata": "read", "organization_administration": "write", "organization_hooks": "write", "organization_plan": "read", "organization_projects": "write", "organization_user_blocking": "write", "pages": "write", "pull_requests": "write", "repository_hooks": "write", "repository_projects": "write", "statuses": "write", "team_discussions": "write", "vulnerability_alerts": "read" }, "events": [] }, "created_at": "2019-05-15T15:20:31Z", "updated_at": "2019-05-15T15:20:31Z" }, "app": { "id": 29310, "node_id": "MDM6QXBwMjkzMTA=", "owner": { "login": "Octocoders", "id": 38302899, "node_id": "MDEyOk9yZ2FuaXphdGlvbjM4MzAyODk5", "avatar_url": "https://avatars1.githubusercontent.com/u/38302899?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Octocoders", "html_url": "https://github.com/Octocoders", "followers_url": "https://api.github.com/users/Octocoders/followers", "following_url": "https://api.github.com/users/Octocoders/following{/other_user}", "gists_url": "https://api.github.com/users/Octocoders/gists{/gist_id}", "starred_url": "https://api.github.com/users/Octocoders/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Octocoders/subscriptions", "organizations_url": "https://api.github.com/users/Octocoders/orgs", "repos_url": "https://api.github.com/users/Octocoders/repos", "events_url": "https://api.github.com/users/Octocoders/events{/privacy}", "received_events_url": "https://api.github.com/users/Octocoders/received_events", "type": "Organization", "site_admin": false }, "name": "octocoders-linter", "description": "", "external_url": "https://octocoders.io", "html_url": "https://github.com/apps/octocoders-linter", "created_at": "2019-04-19T19:36:24Z", "updated_at": "2019-04-19T19:36:56Z", "permissions": { "administration": "write", "checks": "write", "contents": "write", "deployments": "write", "issues": "write", "members": "write", "metadata": "read", "organization_administration": "write", "organization_hooks": "write", "organization_plan": "read", "organization_projects": "write", "organization_user_blocking": "write", "pages": "write", "pull_requests": "write", "repository_hooks": "write", "repository_projects": "write", "statuses": "write", "team_discussions": "write", "vulnerability_alerts": "read" }, "events": [] }, "pull_requests": [ { "url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2", "id": 279147437, "number": 2, "head": { "ref": "changes", "sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821", "repo": { "id": 186853002, "url": "https://api.github.com/repos/Codertocat/Hello-World", "name": "Hello-World" } }, "base": { "ref": "master", "sha": "f95f852bd8fca8fcc58a9a2d6c842781e32a215e", "repo": { "id": 186853002, "url": "https://api.github.com/repos/Codertocat/Hello-World", "name": "Hello-World" } } } ] }, "repository": { "id": 186853002, "node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=", "name": "Hello-World", "full_name": "Codertocat/Hello-World", "private": false, "owner": { "login": "Codertocat", "id": 21031067, "node_id": "MDQ6VXNlcjIxMDMxMDY3", "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Codertocat", "html_url": "https://github.com/Codertocat", "followers_url": "https://api.github.com/users/Codertocat/followers", "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", "organizations_url": "https://api.github.com/users/Codertocat/orgs", "repos_url": "https://api.github.com/users/Codertocat/repos", "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", "received_events_url": "https://api.github.com/users/Codertocat/received_events", "type": "User", "site_admin": false }, "html_url": "https://github.com/Codertocat/Hello-World", "description": null, "fork": false, "url": "https://api.github.com/repos/Codertocat/Hello-World", "forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks", "keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams", "hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks", "issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}", "events_url": "https://api.github.com/repos/Codertocat/Hello-World/events", "assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}", "branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}", "tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags", "blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}", "trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}", "languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages", "stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers", "contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors", "subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers", "subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription", "commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}", "git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}", "comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}", "issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}", "contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}", "compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges", "archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads", "issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}", "pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}", "milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}", "notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}", "releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}", "deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments", "created_at": "2019-05-15T15:19:25Z", "updated_at": "2019-05-15T15:21:03Z", "pushed_at": "2019-05-15T15:20:57Z", "git_url": "git://github.com/Codertocat/Hello-World.git", "ssh_url": "git@github.com:Codertocat/Hello-World.git", "clone_url": "https://github.com/Codertocat/Hello-World.git", "svn_url": "https://github.com/Codertocat/Hello-World", "homepage": null, "size": 0, "stargazers_count": 0, "watchers_count": 0, "language": "Ruby", "has_issues": true, "has_projects": true, "has_downloads": true, "has_wiki": true, "has_pages": true, "forks_count": 1, "mirror_url": null, "archived": false, "disabled": false, "open_issues_count": 2, "license": null, "forks": 1, "open_issues": 2, "watchers": 0, "default_branch": "master" }, "sender": { "login": "Codertocat", "id": 21031067, "node_id": "MDQ6VXNlcjIxMDMxMDY3", "avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Codertocat", "html_url": "https://github.com/Codertocat", "followers_url": "https://api.github.com/users/Codertocat/followers", "following_url": "https://api.github.com/users/Codertocat/following{/other_user}", "gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}", "starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions", "organizations_url": "https://api.github.com/users/Codertocat/orgs", "repos_url": "https://api.github.com/users/Codertocat/repos", "events_url": "https://api.github.com/users/Codertocat/events{/privacy}", "received_events_url": "https://api.github.com/users/Codertocat/received_events", "type": "User", "site_admin": false } } golang-github-alecthomas-participle-v2-2.1.4/_examples/jsonpath/main.go000066400000000000000000000044301505300366400261230ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os" "github.com/alecthomas/participle/v2" ) type pathExpr struct { Parts []part `@@ ( "." @@ )*` } type part struct { Obj string `@Ident` Acc []acc `("[" @@ "]")*` } type acc struct { Name *string `@(String|Char|RawString)` Index *int `| @Int` } var parser = participle.MustBuild[pathExpr]() func main() { if len(os.Args) < 3 { fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(2) } q := os.Args[1] files := os.Args[2:] expr, err := parser.ParseString("", q) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } for _, file := range files { f, err := os.Open(file) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } var input map[string]interface{} if err := json.NewDecoder(f).Decode(&input); err != nil { f.Close() fmt.Fprintln(os.Stderr, err) os.Exit(1) } f.Close() result, err := match(input, expr) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } switch r := result.(type) { case map[string]interface{}: enc := json.NewEncoder(os.Stdout) enc.SetIndent("", " ") _ = enc.Encode(r) default: fmt.Printf("%v\n", r) } } } func match(input map[string]interface{}, expr *pathExpr) (interface{}, error) { var v interface{} = input for _, e := range expr.Parts { switch m := v.(type) { case map[string]interface{}: val, ok := m[e.Obj] if !ok { return nil, fmt.Errorf("not found: %q", e.Obj) } v = val for _, a := range e.Acc { if a.Name != nil { switch m := v.(type) { case map[string]interface{}: val, ok = m[*a.Name].(map[string]interface{}) if !ok { return nil, fmt.Errorf("not found: %q does not contain %q", e.Obj, *a.Name) } v = val default: return nil, fmt.Errorf("cannot access named index in %T", v) } } if a.Index != nil { switch s := v.(type) { case []interface{}: if len(s) <= *a.Index { return nil, fmt.Errorf("not found: %q does contains %d items", e.Obj, len(s)) } v = s[*a.Index] default: return nil, fmt.Errorf("cannot access numeric index in %T", v) } } } default: return nil, fmt.Errorf("cannot read %q, parent is not a map", e.Obj) } } return v, nil } golang-github-alecthomas-participle-v2-2.1.4/_examples/jsonpath/main_test.go000066400000000000000000000010601505300366400271560ustar00rootroot00000000000000package main import ( "encoding/json" "os" "testing" require "github.com/alecthomas/assert/v2" ) func TestExe(t *testing.T) { r, err := os.Open("github-webhook.json") require.NoError(t, err) input := map[string]interface{}{} err = json.NewDecoder(r).Decode(&input) require.NoError(t, err) ast, err := parser.ParseString(``, `check_run.check_suite.pull_requests[0].url`) require.NoError(t, err) result, err := match(input, ast) require.NoError(t, err) require.Equal(t, "https://api.github.com/repos/Codertocat/Hello-World/pulls/2", result) } golang-github-alecthomas-participle-v2-2.1.4/_examples/microc/000077500000000000000000000000001505300366400242755ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/microc/main.go000066400000000000000000000131561505300366400255560ustar00rootroot00000000000000package main import ( "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) // https://www.it.uu.se/katalog/aleji304/CompilersProject/uc.html // // program ::= topdec_list // topdec_list ::= /empty/ | topdec topdec_list // topdec ::= vardec ";" // | funtype ident "(" formals ")" funbody // vardec ::= scalardec | arraydec // scalardec ::= typename ident // arraydec ::= typename ident "[" intconst "]" // typename ::= "int" | "char" // funtype ::= typename | "void" // funbody ::= "{" locals stmts "}" | ";" // formals ::= "void" | formal_list // formal_list ::= formaldec | formaldec "," formal_list // formaldec ::= scalardec | typename ident "[" "]" // locals ::= /empty/ | vardec ";" locals // stmts ::= /empty/ | stmt stmts // stmt ::= expr ";" // | "return" expr ";" | "return" ";" // | "while" condition stmt // | "if" condition stmt else_part // | "{" stmts "}" // | ";" // else_part ::= /empty/ | "else" stmt // condition ::= "(" expr ")" // expr ::= intconst // | ident | ident "[" expr "]" // | unop expr // | expr binop expr // | ident "(" actuals ")" // | "(" expr ")" // unop ::= "-" | "!" // binop ::= "+" | "-" | "*" | "/" // | "<" | ">" | "<=" | ">=" | "!=" | "==" // | "&&" // | "=" // actuals ::= /empty/ | expr_list // expr_list ::= expr | expr "," expr_list type Program struct { Pos lexer.Position TopDec []*TopDec `@@*` } type TopDec struct { Pos lexer.Position FunDec *FunDec ` @@` VarDec *VarDec `| @@ ";"` } type VarDec struct { Pos lexer.Position ArrayDec *ArrayDec ` @@` ScalarDec *ScalarDec `| @@` } type ScalarDec struct { Pos lexer.Position Type string `@Type` Name string `@Ident` } type ArrayDec struct { Pos lexer.Position Type string `@Type` Name string `@Ident` Size int `"[" @Int "]"` } type ReturnStmt struct { Pos lexer.Position Result *Expr `"return" @@?` } type WhileStmt struct { Pos lexer.Position Condition *Expr `"while" "(" @@ ")"` Body *Stmt `@@` } type IfStmt struct { Pos lexer.Position Condition *Expr `"if" "(" @@ ")"` Body *Stmt `@@` Else *Stmt `("else" @@)?` } type Stmts struct { Pos lexer.Position Stmts []*Stmt `@@*` } type Stmt struct { Pos lexer.Position IfStmt *IfStmt ` @@` ReturnStmt *ReturnStmt `| @@` WhileStmt *WhileStmt `| @@` Block *Stmts `| "{" @@ "}"` Expr *Expr `| @@` Empty bool `| @";"` } type FunBody struct { Pos lexer.Position Locals []*VarDec `(@@ ";")*` Stmts *Stmts `@@` } type FunDec struct { Pos lexer.Position ReturnType string `@(Type | "void")` Name string `@Ident` Parameters []*Parameter `"(" ((@@ ("," @@)*) | "void") ")"` FunBody *FunBody `(";" | "{" @@ "}")` } type Parameter struct { Pos lexer.Position Array *ArrayParameter ` @@` Scalar *ScalarDec `| @@` } type ArrayParameter struct { Pos lexer.Position Type string `@Type` Ident string `@Ident "[" "]"` } type Expr struct { Pos lexer.Position Assignment *Assignment `@@` } type Assignment struct { Pos lexer.Position Equality *Equality `@@` Op string `( @"="` Next *Equality ` @@ )?` } type Equality struct { Pos lexer.Position Comparison *Comparison `@@` Op string `[ @( "!" "=" | "=" "=" )` Next *Equality ` @@ ]` } type Comparison struct { Pos lexer.Position Addition *Addition `@@` Op string `[ @( ">" "=" | ">" | "<" "=" | "<" )` Next *Comparison ` @@ ]` } type Addition struct { Pos lexer.Position Multiplication *Multiplication `@@` Op string `[ @( "-" | "+" )` Next *Addition ` @@ ]` } type Multiplication struct { Pos lexer.Position Unary *Unary `@@` Op string `[ @( "/" | "*" )` Next *Multiplication ` @@ ]` } type Unary struct { Pos lexer.Position Op string ` ( @( "!" | "-" )` Unary *Unary ` @@ )` Primary *Primary `| @@` } type Primary struct { Pos lexer.Position Number *int ` @Int` ArrayIndex *ArrayIndex `| @@` CallFunc *CallFunc `| @@` Ident string `| @Ident` SubExpression *Expr `| "(" @@ ")" ` } type ArrayIndex struct { Pos lexer.Position Ident string `@Ident` Index []*Expr `("[" @@ "]")+` } type CallFunc struct { Pos lexer.Position Ident string `@Ident` Index []*Expr `"(" (@@ ("," @@)*)? ")"` } var ( lex = lexer.MustSimple([]lexer.SimpleRule{ {"comment", `//.*|/\*.*?\*/`}, {"whitespace", `\s+`}, {"Type", `\b(int|char)\b`}, {"Ident", `\b([a-zA-Z_][a-zA-Z0-9_]*)\b`}, {"Punct", `[-,()*/+%{};&!=:<>]|\[|\]`}, {"Int", `\d+`}, }) parser = participle.MustBuild[Program]( participle.Lexer(lex), participle.UseLookahead(2)) ) const sample = ` /* This is an example uC program. */ void putint(int i); int fac(int n) { if (n < 2) return n; return n * fac(n - 1); } int sum(int n, int a[]) { int i; int s; i = 0; s = 0; while (i <= n) { s = s + a[i]; i = i + 1; } return s; } int main(void) { int a[2]; a[0] = fac(5); a[1] = 27; putint(sum(2, a)); // prints 147 return 0; } ` func main() { ast, err := parser.ParseString("", sample) repr.Println(ast) if err != nil { panic(err) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/microc/main_test.go000066400000000000000000000007101505300366400266050ustar00rootroot00000000000000package main import ( "strings" "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { program, err := parser.ParseString("", sample) require.NoError(t, err) repr.Println(program) } func BenchmarkParser(b *testing.B) { src := strings.Repeat(sample, 10) b.ReportAllocs() b.ReportMetric(float64(len(src)*b.N), "B/s") for i := 0; i < b.N; i++ { _, _ = parser.ParseString("", src) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/precedenceclimbing/000077500000000000000000000000001505300366400266235ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/precedenceclimbing/main.go000066400000000000000000000051311505300366400300760ustar00rootroot00000000000000// Package main shows an example of how to add precedence climbing to a Participle parser. // // Precedence climbing is an approach to parsing expressions that efficiently // produces compact parse trees. // // In contrast, naive recursive descent expression parsers produce parse trees proportional in // complexity to the number of operators supported. This impacts both readability and // performance. // // It is based on https://eli.thegreenplace.net/2012/08/02/parsing-expressions-by-precedence-climbing package main import ( "fmt" "os" "strconv" "strings" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) type opInfo struct { RightAssociative bool Priority int } var info = map[string]opInfo{ "+": {Priority: 1}, "-": {Priority: 1}, "*": {Priority: 2}, "/": {Priority: 2}, "^": {RightAssociative: true, Priority: 3}, } type Expr struct { Terminal *int Left *Expr Op string Right *Expr } func (e *Expr) String() string { if e.Left != nil { return fmt.Sprintf("(%s %s %s)", e.Left, e.Op, e.Right) } return fmt.Sprintf("%d", *e.Terminal) } func (e *Expr) Parse(lex *lexer.PeekingLexer) error { *e = *parseExpr(lex, 0) return nil } // (1 + 2) * 3 func parseExpr(lex *lexer.PeekingLexer, minPrec int) *Expr { lhs := parseAtom(lex) for { tok := peek(lex) if tok.EOF() || !isOp(rune(tok.Type)) || info[tok.Value].Priority < minPrec { break } op := tok.Value nextMinPrec := info[op].Priority if !info[op].RightAssociative { nextMinPrec++ } lex.Next() rhs := parseExpr(lex, nextMinPrec) lhs = parseOp(op, lhs, rhs) } return lhs } func parseAtom(lex *lexer.PeekingLexer) *Expr { tok := peek(lex) if tok.Type == '(' { lex.Next() val := parseExpr(lex, 1) if peek(lex).Value != ")" { panic("unmatched (") } lex.Next() return val } else if tok.EOF() { panic("unexpected EOF") } else if isOp(rune(tok.Type)) { panic("expected a terminal not " + tok.String()) } else { lex.Next() n, err := strconv.ParseInt(tok.Value, 10, 64) if err != nil { panic("invalid number " + tok.Value) } in := int(n) return &Expr{Terminal: &in} } } func isOp(rn rune) bool { return strings.ContainsRune("+-*/^", rn) } func peek(lex *lexer.PeekingLexer) *lexer.Token { return lex.Peek() } func parseOp(op string, lhs *Expr, rhs *Expr) *Expr { return &Expr{ Op: op, Left: lhs, Right: rhs, } } var parser = participle.MustBuild[Expr]() func main() { e, err := parser.ParseString("", strings.Join(os.Args[1:], " ")) fmt.Println(e) repr.Println(e) if err != nil { panic(err) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/precedenceclimbing/main_test.go000066400000000000000000000010031505300366400311270ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" ) func TestExe(t *testing.T) { actual, err := parser.ParseString("", `1 + 2 - 3 * (4 + 2)`) require.NoError(t, err) expected := expr( expr(intp(1), "+", intp(2)), "-", expr(intp(3), "*", expr(intp(4), "+", intp(2)))) require.Equal(t, expected, actual) } func expr(l *Expr, op string, r *Expr) *Expr { return &Expr{Left: l, Op: op, Right: r} } func intp(n int) *Expr { return &Expr{Terminal: &n} } golang-github-alecthomas-participle-v2-2.1.4/_examples/protobuf/000077500000000000000000000000001505300366400246615ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/protobuf/example.proto000066400000000000000000000006121505300366400274000ustar00rootroot00000000000000syntax = "proto3"; package test.test; message SearchRequest { string query = 1; int32 page_number = 2; int32 result_per_page = 3; map scores = 4; message Foo {} enum Bar { FOO = 0; } } message SearchResponse { string results = 1; } enum Type { INT = 0; DOUBLE = 1; } service SearchService { rpc Search(SearchRequest) returns (SearchResponse); } golang-github-alecthomas-participle-v2-2.1.4/_examples/protobuf/main.go000066400000000000000000000126231505300366400261400ustar00rootroot00000000000000// nolint: govet, golint package main import ( "fmt" "os" "github.com/alecthomas/kong" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) type Proto struct { Pos lexer.Position Entries []*Entry `( @@ ";"* )*` } type Entry struct { Pos lexer.Position Syntax string ` "syntax" "=" @String` Package string `| "package" @(Ident ( "." Ident )*)` Import string `| "import" @String` Message *Message `| @@` Service *Service `| @@` Enum *Enum `| @@` Option *Option `| "option" @@` Extend *Extend `| @@` } type Option struct { Pos lexer.Position Name string `( "(" @Ident @( "." Ident )* ")" | @Ident @( "." @Ident )* )` Attr *string `( "." @Ident ( "." @Ident )* )?` Value *Value `"=" @@` } type Value struct { Pos lexer.Position String *string ` @String` Number *float64 `| @Float` Int *int64 `| @Int` Bool *bool `| (@"true" | "false")` Reference *string `| @Ident @( "." Ident )*` Map *Map `| @@` Array *Array `| @@` } type Array struct { Pos lexer.Position Elements []*Value `"[" ( @@ ( ","? @@ )* )? "]"` } type Map struct { Pos lexer.Position Entries []*MapEntry `"{" ( @@ ( ( "," )? @@ )* )? "}"` } type MapEntry struct { Pos lexer.Position Key *Value `@@` Value *Value `":"? @@` } type Extensions struct { Pos lexer.Position Extensions []Range `"extensions" @@ ( "," @@ )*` } type Reserved struct { Pos lexer.Position Reserved []Range `"reserved" @@ ( "," @@ )*` } type Range struct { Ident string ` @String` Start int `| ( @Int` End *int ` ( "to" ( @Int` Max bool ` | @"max" ) )? )` } type Extend struct { Pos lexer.Position Reference string `"extend" @Ident ( "." @Ident )*` Fields []*Field `"{" ( @@ ";"? )* "}"` } type Service struct { Pos lexer.Position Name string `"service" @Ident` Entry []*ServiceEntry `"{" ( @@ ";"? )* "}"` } type ServiceEntry struct { Pos lexer.Position Option *Option ` "option" @@` Method *Method `| @@` } type Method struct { Pos lexer.Position Name string `"rpc" @Ident` StreamingRequest bool `"(" @"stream"?` Request *Type ` @@ ")"` StreamingResponse bool `"returns" "(" @"stream"?` Response *Type ` @@ ")"` Options []*Option `( "{" ( "option" @@ ";" )* "}" )?` } type Enum struct { Pos lexer.Position Name string `"enum" @Ident` Values []*EnumEntry `"{" ( @@ ( ";" )* )* "}"` } type EnumEntry struct { Pos lexer.Position Value *EnumValue ` @@` Option *Option `| "option" @@` } type EnumValue struct { Pos lexer.Position Key string `@Ident` Value int `"=" @( [ "-" ] Int )` Options []*Option `( "[" @@ ( "," @@ )* "]" )?` } type Message struct { Pos lexer.Position Name string `"message" @Ident` Entries []*MessageEntry `"{" @@* "}"` } type MessageEntry struct { Pos lexer.Position Enum *Enum `( @@` Option *Option ` | "option" @@` Message *Message ` | @@` Oneof *Oneof ` | @@` Extend *Extend ` | @@` Reserved *Reserved ` | @@` Extensions *Extensions ` | @@` Field *Field ` | @@ ) ";"*` } type Oneof struct { Pos lexer.Position Name string `"oneof" @Ident` Entries []*OneofEntry `"{" ( @@ ";"* )* "}"` } type OneofEntry struct { Pos lexer.Position Field *Field ` @@` Option *Option `| "option" @@` } type Field struct { Pos lexer.Position Optional bool `( @"optional"` Required bool ` | @"required"` Repeated bool ` | @"repeated" )?` Type *Type `@@` Name string `@Ident` Tag int `"=" @Int` Options []*Option `( "[" @@ ( "," @@ )* "]" )?` } type Scalar int const ( None Scalar = iota Double Float Int32 Int64 Uint32 Uint64 Sint32 Sint64 Fixed32 Fixed64 SFixed32 SFixed64 Bool String Bytes ) var scalarToString = map[Scalar]string{ None: "None", Double: "Double", Float: "Float", Int32: "Int32", Int64: "Int64", Uint32: "Uint32", Uint64: "Uint64", Sint32: "Sint32", Sint64: "Sint64", Fixed32: "Fixed32", Fixed64: "Fixed64", SFixed32: "SFixed32", SFixed64: "SFixed64", Bool: "Bool", String: "String", Bytes: "Bytes", } func (s Scalar) GoString() string { return scalarToString[s] } var stringToScalar = map[string]Scalar{ "double": Double, "float": Float, "int32": Int32, "int64": Int64, "uint32": Uint32, "uint64": Uint64, "sint32": Sint32, "sint64": Sint64, "fixed32": Fixed32, "fixed64": Fixed64, "sfixed32": SFixed32, "sfixed64": SFixed64, "bool": Bool, "string": String, "bytes": Bytes, } func (s *Scalar) Parse(lex *lexer.PeekingLexer) error { token := lex.Peek() v, ok := stringToScalar[token.Value] if !ok { return participle.NextMatch } lex.Next() *s = v return nil } type Type struct { Pos lexer.Position Scalar Scalar ` @@` Map *MapType `| @@` Reference string `| @(Ident ( "." Ident )*)` } type MapType struct { Pos lexer.Position Key *Type `"map" "<" @@` Value *Type `"," @@ ">"` } var ( parser = participle.MustBuild[Proto](participle.UseLookahead(2)) cli struct { Files []string `required existingfile arg help:"Protobuf files."` } ) func main() { ctx := kong.Parse(&cli) for _, file := range cli.Files { fmt.Println(file) r, err := os.Open(file) ctx.FatalIfErrorf(err, "") proto, err := parser.Parse("", r) ctx.FatalIfErrorf(err, "") repr.Println(proto, repr.Hide[lexer.Position]()) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/protobuf/main_test.go000066400000000000000000000010721505300366400271730ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" ) func TestExe(t *testing.T) { _, err := parser.ParseString("", ` syntax = "proto3"; package test.test; message SearchRequest { string query = 1; int32 page_number = 2; int32 result_per_page = 3; map scores = 4; message Foo {} enum Bar { FOO = 0; } } message SearchResponse { string results = 1; } enum Type { INT = 0; DOUBLE = 1; } service SearchService { rpc Search(SearchRequest) returns (SearchResponse); } `) require.NoError(t, err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/simpleexpr/000077500000000000000000000000001505300366400252115ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/simpleexpr/main.go000066400000000000000000000022311505300366400264620ustar00rootroot00000000000000package main import ( "github.com/alecthomas/kong" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" ) type Expr struct { Lhs *Value `@@` Tail []*Oper `@@*` } type Oper struct { Op string `@( "|" "|" | "&" "&" | "!" "=" | ("!"|"="|"<"|">") "="? | "+" | "-" | "/" | "*" )` Rhs *Value `@@` } type Value struct { Number *float64 ` @Float | @Int` String *string `| @String` Bool *string `| ( @"true" | "false" )` Nil bool `| @"nil"` SubExpression *Expr `| "(" @@ ")" ` } var ( cli struct { Expr string `arg:"" help:"Expression."` } parser = participle.MustBuild[Expr]() ) func main() { kctx := kong.Parse(&cli, kong.Description(` A simple expression parser that does not capture precedence at all. Precedence must be applied at the evaluation phase. The advantage of this approach over expr1, which does encode precedence in the parser, is that it is significantly less complex and less nested. The advantage of this over the "precedenceclimbing" example is that no custom parsing is required. `)) expr, err := parser.ParseString("", cli.Expr) kctx.FatalIfErrorf(err) repr.Println(expr) } golang-github-alecthomas-participle-v2-2.1.4/_examples/simpleexpr/main_test.go000066400000000000000000000003671505300366400275310ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { expr, err := parser.ParseString("", `1 + 2 / 3 * (1 + 2)`) repr.Println(expr) require.NoError(t, err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/sql/000077500000000000000000000000001505300366400236205ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/sql/main.go000066400000000000000000000105361505300366400251000ustar00rootroot00000000000000// nolint: govet package main import ( "github.com/alecthomas/kong" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) type Boolean bool func (b *Boolean) Capture(values []string) error { *b = values[0] == "TRUE" return nil } // Select based on http://www.h2database.com/html/grammar.html type Select struct { Top *Term `"SELECT" ( "TOP" @@ )?` Distinct bool `( @"DISTINCT"` All bool ` | @"ALL" )?` Expression *SelectExpression `@@` From *From `"FROM" @@` Limit *Expression `( "LIMIT" @@ )?` Offset *Expression `( "OFFSET" @@ )?` GroupBy *Expression `( "GROUP" "BY" @@ )?` } type From struct { TableExpressions []*TableExpression `@@ ( "," @@ )*` Where *Expression `( "WHERE" @@ )?` } type TableExpression struct { Table string `( @Ident ( "." @Ident )*` Select *Select ` | "(" @@ ")"` Values []*Expression ` | "VALUES" "(" @@ ( "," @@ )* ")")` As string `( "AS" @Ident )?` } type SelectExpression struct { All bool ` @"*"` Expressions []*AliasedExpression `| @@ ( "," @@ )*` } type AliasedExpression struct { Expression *Expression `@@` As string `( "AS" @Ident )?` } type Expression struct { Or []*OrCondition `@@ ( "OR" @@ )*` } type OrCondition struct { And []*Condition `@@ ( "AND" @@ )*` } type Condition struct { Operand *ConditionOperand ` @@` Not *Condition `| "NOT" @@` Exists *Select `| "EXISTS" "(" @@ ")"` } type ConditionOperand struct { Operand *Operand `@@` ConditionRHS *ConditionRHS `@@?` } type ConditionRHS struct { Compare *Compare ` @@` Is *Is `| "IS" @@` Between *Between `| "BETWEEN" @@` In *In `| "IN" "(" @@ ")"` Like *Like `| "LIKE" @@` } type Compare struct { Operator string `@( "<>" | "<=" | ">=" | "=" | "<" | ">" | "!=" )` Operand *Operand `( @@` Select *CompareSelect ` | @@ )` } type CompareSelect struct { All bool `( @"ALL"` Any bool ` | @"ANY"` Some bool ` | @"SOME" )` Select *Select `"(" @@ ")"` } type Like struct { Not bool `[ @"NOT" ]` Operand *Operand `@@` } type Is struct { Not bool `[ @"NOT" ]` Null bool `( @"NULL"` DistinctFrom *Operand ` | "DISTINCT" "FROM" @@ )` } type Between struct { Start *Operand `@@` End *Operand `"AND" @@` } type In struct { Select *Select ` @@` Expressions []*Expression `| @@ ( "," @@ )*` } type Operand struct { Summand []*Summand `@@ ( "|" "|" @@ )*` } type Summand struct { LHS *Factor `@@` Op string `[ @("+" | "-")` RHS *Factor ` @@ ]` } type Factor struct { LHS *Term `@@` Op string `( @("*" | "/" | "%")` RHS *Term ` @@ )?` } type Term struct { Select *Select ` @@` Value *Value `| @@` SymbolRef *SymbolRef `| @@` SubExpression *Expression `| "(" @@ ")"` } type SymbolRef struct { Symbol string `@Ident @( "." Ident )*` Parameters []*Expression `( "(" @@ ( "," @@ )* ")" )?` } type Value struct { Wildcard bool `( @"*"` Number *float64 ` | @Number` String *string ` | @String` Boolean *Boolean ` | @("TRUE" | "FALSE")` Null bool ` | @"NULL"` Array *Array ` | @@ )` } type Array struct { Expressions []*Expression `"(" @@ ( "," @@ )* ")"` } var ( cli struct { SQL string `arg:"" required:"" help:"SQL to parse."` } sqlLexer = lexer.MustSimple([]lexer.SimpleRule{ {`Keyword`, `(?i)\b(SELECT|FROM|TOP|DISTINCT|ALL|WHERE|GROUP|BY|HAVING|UNION|MINUS|EXCEPT|INTERSECT|ORDER|LIMIT|OFFSET|TRUE|FALSE|NULL|IS|NOT|ANY|SOME|BETWEEN|AND|OR|LIKE|AS|IN)\b`}, {`Ident`, `[a-zA-Z_][a-zA-Z0-9_]*`}, {`Number`, `[-+]?\d*\.?\d+([eE][-+]?\d+)?`}, {`String`, `'[^']*'|"[^"]*"`}, {`Operators`, `<>|!=|<=|>=|[-+*/%,.()=<>]`}, {"whitespace", `\s+`}, }) parser = participle.MustBuild[Select]( participle.Lexer(sqlLexer), participle.Unquote("String"), participle.CaseInsensitive("Keyword"), // participle.Elide("Comment"), // Need to solve left recursion detection first, if possible. // participle.UseLookahead(), ) ) func main() { ctx := kong.Parse(&cli) sql, err := parser.ParseString("", cli.SQL) repr.Println(sql, repr.Indent(" "), repr.OmitEmpty(true)) ctx.FatalIfErrorf(err) } golang-github-alecthomas-participle-v2-2.1.4/_examples/sql/main_test.go000066400000000000000000000004051505300366400261310ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { sel, err := parser.ParseString("", `SELECT * FROM table WHERE attr = 10`) require.NoError(t, err) repr.Println(sel) } golang-github-alecthomas-participle-v2-2.1.4/_examples/stateful/000077500000000000000000000000001505300366400246505ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/stateful/main.go000066400000000000000000000022371505300366400261270ustar00rootroot00000000000000package main import ( "log" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) type Terminal struct { String *String ` @@` Ident string `| @Ident` } type Expr struct { Left *Terminal `@@` Op string `( @Oper` Right *Terminal ` @@)?` } type Fragment struct { Escaped string `( @Escaped` Expr *Expr ` | "${" @@ "}"` Text string ` | @Char)` } type String struct { Fragments []*Fragment `"\"" @@* "\""` } var ( def = lexer.MustStateful(lexer.Rules{ "Root": { {`String`, `"`, lexer.Push("String")}, }, "String": { {"Escaped", `\\.`, nil}, {"StringEnd", `"`, lexer.Pop()}, {"Expr", `\${`, lexer.Push("Expr")}, {"Char", `\$|[^$"\\]+`, nil}, }, "Expr": { lexer.Include("Root"), {`Whitespace`, `\s+`, nil}, {`Oper`, `[-+/*%]`, nil}, {"Ident", `\w+`, nil}, {"ExprEnd", `}`, lexer.Pop()}, }, }) parser = participle.MustBuild[String](participle.Lexer(def), participle.Elide("Whitespace")) ) func main() { actual, err := parser.ParseString("", `"hello $(world) ${first + "${last}"}"`) repr.Println(actual) if err != nil { log.Fatal(err) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/stateful/main_test.go000066400000000000000000000003651505300366400271660ustar00rootroot00000000000000package main import ( "log" "testing" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { actual, err := parser.ParseString("", `"hello $(world) ${first + "${last}"}"`) if err != nil { log.Fatal(err) } repr.Println(actual) } golang-github-alecthomas-participle-v2-2.1.4/_examples/thrift/000077500000000000000000000000001505300366400243215ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/thrift/lexer_gen.go000066400000000000000000000155371505300366400266330ustar00rootroot00000000000000// Code generated by Participle. DO NOT EDIT. package main import ( "io" "regexp/syntax" "strings" "unicode/utf8" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) var _ syntax.Op var Lexer lexer.Definition = definitionImpl{} type definitionImpl struct{} func (definitionImpl) Symbols() map[string]lexer.TokenType { return map[string]lexer.TokenType{ "Comment": -7, "EOF": -1, "Ident": -3, "Number": -2, "Punct": -6, "String": -4, "Whitespace": -5, } } func (definitionImpl) LexString(filename string, s string) (lexer.Lexer, error) { return &lexerImpl{ s: s, pos: lexer.Position{ Filename: filename, Line: 1, Column: 1, }, states: []lexerState{{name: "Root"}}, }, nil } func (d definitionImpl) LexBytes(filename string, b []byte) (lexer.Lexer, error) { return d.LexString(filename, string(b)) } func (d definitionImpl) Lex(filename string, r io.Reader) (lexer.Lexer, error) { s := &strings.Builder{} _, err := io.Copy(s, r) if err != nil { return nil, err } return d.LexString(filename, s.String()) } type lexerState struct { name string groups []string } type lexerImpl struct { s string p int pos lexer.Position states []lexerState } func (l *lexerImpl) Next() (lexer.Token, error) { if l.p == len(l.s) { return lexer.EOFToken(l.pos), nil } var ( state = l.states[len(l.states)-1] groups []int sym lexer.TokenType ) switch state.name { case "Root": if match := matchNumber(l.s, l.p); match[1] != 0 { sym = -2 groups = match[:] } else if match := matchIdent(l.s, l.p); match[1] != 0 { sym = -3 groups = match[:] } else if match := matchString(l.s, l.p); match[1] != 0 { sym = -4 groups = match[:] } else if match := matchWhitespace(l.s, l.p); match[1] != 0 { sym = -5 groups = match[:] } else if match := matchPunct(l.s, l.p); match[1] != 0 { sym = -6 groups = match[:] } else if match := matchComment(l.s, l.p); match[1] != 0 { sym = -7 groups = match[:] } } if groups == nil { sample := []rune(l.s[l.p:]) if len(sample) > 16 { sample = append(sample[:16], []rune("...")...) } return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", sample) } pos := l.pos span := l.s[groups[0]:groups[1]] l.p = groups[1] l.pos.Advance(span) return lexer.Token{ Type: sym, Value: span, Pos: pos, }, nil } func (l *lexerImpl) sgroups(match []int) []string { sgroups := make([]string, len(match)/2) for i := 0; i < len(match)-1; i += 2 { sgroups[i/2] = l.s[l.p+match[i] : l.p+match[i+1]] } return sgroups } // [0-9]+ func matchNumber(s string, p int) (groups [2]int) { // [0-9] (CharClass) l0 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] switch { case rn >= '0' && rn <= '9': return p + 1 } return -1 } // [0-9]+ (Plus) l1 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } for len(s) > p { if np := l0(s, p); np == -1 { return p } else { p = np } } return p } np := l1(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // [0-9A-Z_a-z]+ func matchIdent(s string, p int) (groups [2]int) { // [0-9A-Z_a-z] (CharClass) l0 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] switch { case rn >= '0' && rn <= '9': return p + 1 case rn >= 'A' && rn <= 'Z': return p + 1 case rn == '_': return p + 1 case rn >= 'a' && rn <= 'z': return p + 1 } return -1 } // [0-9A-Z_a-z]+ (Plus) l1 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } for len(s) > p { if np := l0(s, p); np == -1 { return p } else { p = np } } return p } np := l1(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // "[^"]*" func matchString(s string, p int) (groups [2]int) { // " (Literal) l0 := func(s string, p int) int { if p < len(s) && s[p] == '"' { return p + 1 } return -1 } // [^"] (CharClass) l1 := func(s string, p int) int { if len(s) <= p { return -1 } var ( rn rune n int ) if s[p] < utf8.RuneSelf { rn, n = rune(s[p]), 1 } else { rn, n = utf8.DecodeRuneInString(s[p:]) } switch { case rn >= '\x00' && rn <= '!': return p + 1 case rn >= '#' && rn <= '\U0010ffff': return p + n } return -1 } // [^"]* (Star) l2 := func(s string, p int) int { for len(s) > p { if np := l1(s, p); np == -1 { return p } else { p = np } } return p } // "[^"]*" (Concat) l3 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } if p = l2(s, p); p == -1 { return -1 } if p = l0(s, p); p == -1 { return -1 } return p } np := l3(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // [\t-\n\f-\r ]+ func matchWhitespace(s string, p int) (groups [2]int) { // [\t-\n\f-\r ] (CharClass) l0 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] switch { case rn >= '\t' && rn <= '\n': return p + 1 case rn >= '\f' && rn <= '\r': return p + 1 case rn == ' ': return p + 1 } return -1 } // [\t-\n\f-\r ]+ (Plus) l1 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } for len(s) > p { if np := l0(s, p); np == -1 { return p } else { p = np } } return p } np := l1(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // [\(-\),\.:<->\{\}] func matchPunct(s string, p int) (groups [2]int) { // [\(-\),\.:<->\{\}] (CharClass) l0 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] switch { case rn >= '(' && rn <= ')': return p + 1 case rn == ',': return p + 1 case rn == '.': return p + 1 case rn == ':': return p + 1 case rn >= '<' && rn <= '>': return p + 1 case rn == '{': return p + 1 case rn == '}': return p + 1 } return -1 } np := l0(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // //(?-s:.)* func matchComment(s string, p int) (groups [2]int) { // // (Literal) l0 := func(s string, p int) int { if p+2 < len(s) && s[p:p+2] == "//" { return p + 2 } return -1 } // (?-s:.) (AnyCharNotNL) l1 := func(s string, p int) int { var ( rn rune n int ) if s[p] < utf8.RuneSelf { rn, n = rune(s[p]), 1 } else { rn, n = utf8.DecodeRuneInString(s[p:]) } if len(s) <= p+n || rn == '\n' { return -1 } return p + n } // (?-s:.)* (Star) l2 := func(s string, p int) int { for len(s) > p { if np := l1(s, p); np == -1 { return p } else { p = np } } return p } // //(?-s:.)* (Concat) l3 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } if p = l2(s, p); p == -1 { return -1 } return p } np := l3(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } golang-github-alecthomas-participle-v2-2.1.4/_examples/thrift/main.go000066400000000000000000000126101505300366400255740ustar00rootroot00000000000000// Package main implements a parser for Thrift files (https://thrift.apache.org/) // // It parses namespaces, exceptions, services, structs, consts, typedefs and enums, but is easily // extensible to more. // // It also supports annotations and method throws. package main import ( "fmt" "os" "strings" "github.com/alecthomas/kong" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) type Namespace struct { Pos lexer.Position Language string `"namespace" @Ident` Namespace string `@Ident ( @"." @Ident )*` } type Type struct { Pos lexer.Position Name string `@Ident ( @"." @Ident )*` TypeOne *Type `( "<" @@ ( ","` TypeTwo *Type ` @@ )? ">" )?` } type Annotation struct { Pos lexer.Position Key string `@Ident ( @"." @Ident )*` Value *Literal `( "=" @@ )?` } type Field struct { Pos lexer.Position ID string `@Number ":"` Requirement string `@( "optional" | "required" )?` Type *Type `@@` Name string `@Ident` Default *Literal `( "=" @@ )?` Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )? ";"?` } type Exception struct { Pos lexer.Position Name string `"exception" @Ident "{"` Fields []*Field `@@ @@* "}"` Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?` } type Struct struct { Pos lexer.Position Union bool `( "struct" | @"union" )` Name string `@Ident "{"` Fields []*Field `@@* "}"` Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?` } type Argument struct { Pos lexer.Position ID string `@Number ":"` Type *Type `@@` Name string `@Ident` } type Throw struct { Pos lexer.Position ID string `@Number ":"` Type *Type `@@` Name string `@Ident` } type Method struct { Pos lexer.Position ReturnType *Type `@@` Name string `@Ident` Arguments []*Argument `"(" ( @@ ( "," @@ )* )? ")"` Throws []*Throw `( "throws" "(" @@ ( "," @@ )* ")" )?` Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?` } type Service struct { Pos lexer.Position Name string `"service" @Ident` Extends string `( "extends" @Ident ( @"." @Ident )* )?` Methods []*Method `"{" ( @@ ";"? )* "}"` Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?` } // Literal is a "union" type, where only one matching value will be present. type Literal struct { Pos lexer.Position Str *string ` @String` Number *float64 `| @Number` Bool *string `| @( "true" | "false" )` Reference *string `| @Ident ( @"." @Ident )*` Minus *Literal `| "-" @@` List []*Literal `| "[" ( @@ ","? )* "]"` Map []*MapItem `| "{" ( @@ ","? )* "}"` } func (l *Literal) GoString() string { switch { case l.Str != nil: return fmt.Sprintf("%q", *l.Str) case l.Number != nil: return fmt.Sprintf("%v", *l.Number) case l.Bool != nil: return fmt.Sprintf("%v", *l.Bool) case l.Reference != nil: return fmt.Sprintf("%s", *l.Reference) case l.Minus != nil: return fmt.Sprintf("-%v", l.Minus) case l.List != nil: parts := []string{} for _, e := range l.List { parts = append(parts, e.GoString()) } return fmt.Sprintf("[%s]", strings.Join(parts, ", ")) case l.Map != nil: parts := []string{} for _, e := range l.Map { parts = append(parts, e.GoString()) } return fmt.Sprintf("{%s}", strings.Join(parts, ", ")) } panic("unsupported?") } type MapItem struct { Pos lexer.Position Key *Literal `@@ ":"` Value *Literal `@@` } func (m *MapItem) GoString() string { return fmt.Sprintf("%v: %v", m.Key, m.Value) } type Case struct { Pos lexer.Position Name string `@Ident` Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?` Value *Literal `( "=" @@ )? ( "," | ";" )?` } type Enum struct { Pos lexer.Position Name string `"enum" @Ident "{"` Cases []*Case `@@* "}"` Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?` } type Typedef struct { Pos lexer.Position Type *Type `"typedef" @@` Name string `@Ident` } type Const struct { Pos lexer.Position Type *Type `"const" @@` Name string `@Ident` Value *Literal `"=" @@ ";"?` } type Entry struct { Pos lexer.Position Includes []string ` "include" @String` Namespaces []*Namespace `| @@` Structs []*Struct `| @@` Exceptions []*Exception `| @@` Services []*Service `| @@` Enums []*Enum `| @@` Typedefs []*Typedef `| @@` Consts []*Const `| @@` } // Thrift files consist of a set of top-level directives and definitions. // // The grammar type Thrift struct { Pos lexer.Position Entries []*Entry `@@*` } var ( def = lexer.MustSimple([]lexer.SimpleRule{ {"Number", `\d+`}, {"Ident", `\w+`}, {"String", `"[^"]*"`}, {"Whitespace", `\s+`}, {"Punct", `[,.<>(){}=:]`}, {"Comment", `//.*`}, }) parser = participle.MustBuild[Thrift]( participle.Lexer(def), participle.Unquote(), participle.Elide("Whitespace"), ) ) func main() { var cli struct { Gen bool `help:"Generate lexer."` Files []string `help:"Thrift files."` } ctx := kong.Parse(&cli) for _, file := range cli.Files { r, err := os.Open(file) ctx.FatalIfErrorf(err, "") thrift, err := parser.Parse("", r) ctx.FatalIfErrorf(err, "") repr.Println(thrift) } } golang-github-alecthomas-participle-v2-2.1.4/_examples/thrift/main_test.go000066400000000000000000000043411505300366400266350ustar00rootroot00000000000000package main import ( "strings" "testing" "time" require "github.com/alecthomas/assert/v2" thriftparser "github.com/alecthomas/go-thrift/parser" "github.com/alecthomas/participle/v2" ) var ( source = strings.TrimSpace(` namespace cpp thrift.example namespace java thrift.example enum TweetType { TWEET RETWEET = 2 DM = 3 REPLY } struct Location { 1: required double latitude 2: required double longitude } struct Tweet { 1: required i32 userId 2: required string userName 3: required string text 4: optional Location loc 5: optional TweetType tweetType = TweetType.TWEET 16: optional string language = "english" } typedef list TweetList struct TweetSearchResult { 1: TweetList tweets } exception TwitterUnavailable { 1: string message } const i32 MAX_RESULTS = 100 service Twitter { void ping() bool postTweet(1:Tweet tweet) throws (1:TwitterUnavailable unavailable) TweetSearchResult searchTweets(1:string query) void zip() } `) ) func BenchmarkParticipleThrift(b *testing.B) { _, err := parser.ParseString("", source) require.NoError(b, err) b.ResetTimer() b.ReportAllocs() start := time.Now() for i := 0; i < b.N; i++ { _, _ = parser.ParseString("", source) } b.ReportMetric(float64(len(source)*b.N)*float64(time.Since(start)/time.Second)/1024/1024, "MiB/s") } func BenchmarkParticipleThriftGenerated(b *testing.B) { parser := participle.MustBuild[Thrift]( participle.Lexer(Lexer), participle.Unquote(), participle.Elide("Whitespace"), ) _, err := parser.ParseString("", source) require.NoError(b, err) b.ResetTimer() b.ReportAllocs() start := time.Now() for i := 0; i < b.N; i++ { _, _ = parser.ParseString("", source) } b.ReportMetric(float64(len(source)*b.N)*float64(time.Since(start)/time.Second)/1024/1024, "MiB/s") } func BenchmarkGoThriftParser(b *testing.B) { _, err := thriftparser.ParseReader("user.thrift", strings.NewReader(source)) require.NoError(b, err) b.ResetTimer() b.ReportAllocs() start := time.Now() for i := 0; i < b.N; i++ { _, _ = thriftparser.ParseReader("user.thrift", strings.NewReader(source)) } b.ReportMetric(float64(len(source)*b.N)*float64(time.Since(start)/time.Second)/1024/1024, "MiB/s") } golang-github-alecthomas-participle-v2-2.1.4/_examples/toml/000077500000000000000000000000001505300366400237745ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/_examples/toml/example.toml000066400000000000000000000010611505300366400263220ustar00rootroot00000000000000# This is a TOML document. title = "TOML Example" [owner] name = "Tom Preston-Werner" dob = 1979-05-27T07:32:00-08:00 # First class dates [database] server = "192.168.1.1" ports = [ 8001, 8001, 8002 ] connection_max = 5000 enabled = true enabled = false [servers] # Indentation (tabs and/or spaces) is allowed but not required [servers.alpha] ip = "10.0.0.1" dc = "eqdc10" [servers.beta] ip = "10.0.0.2" dc = "eqdc10" [clients] data = [ ["gamma", "delta"], [1, 2] ] # Line breaks are OK when inside arrays hosts = [ "alpha", "omega" ] golang-github-alecthomas-participle-v2-2.1.4/_examples/toml/main.go000066400000000000000000000027611505300366400252550ustar00rootroot00000000000000package main import ( "os" "github.com/alecthomas/kong" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) type TOML struct { Pos lexer.Position Entries []*Entry `@@*` } type Entry struct { Field *Field ` @@` Section *Section `| @@` } type Field struct { Key string `@Ident "="` Value *Value `@@` } type Value struct { String *string ` @String` DateTime *string `| @DateTime` Date *string `| @Date` Time *string `| @Time` Bool *bool `| (@"true" | "false")` Number *float64 `| @Number` List []*Value `| "[" ( @@ ( "," @@ )* )? "]"` } type Section struct { Name string `"[" @(Ident ( "." Ident )*) "]"` Fields []*Field `@@*` } var ( tomlLexer = lexer.MustSimple([]lexer.SimpleRule{ {"DateTime", `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?(-\d\d:\d\d)?`}, {"Date", `\d\d\d\d-\d\d-\d\d`}, {"Time", `\d\d:\d\d:\d\d(\.\d+)?`}, {"Ident", `[a-zA-Z_][a-zA-Z_0-9]*`}, {"String", `"[^"]*"`}, {"Number", `[-+]?[.0-9]+\b`}, {"Punct", `\[|]|[-!()+/*=,]`}, {"comment", `#[^\n]+`}, {"whitespace", `\s+`}, }) tomlParser = participle.MustBuild[TOML]( participle.Lexer(tomlLexer), participle.Unquote("String"), ) cli struct { File string `help:"TOML file to parse." arg:""` } ) func main() { ctx := kong.Parse(&cli) r, err := os.Open(cli.File) ctx.FatalIfErrorf(err) defer r.Close() toml, err := tomlParser.Parse(cli.File, r) ctx.FatalIfErrorf(err) repr.Println(toml) } golang-github-alecthomas-participle-v2-2.1.4/_examples/toml/main_test.go000066400000000000000000000014321505300366400263060ustar00rootroot00000000000000package main import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/repr" ) func TestExe(t *testing.T) { toml, err := tomlParser.ParseString("", ` # This is a TOML document. title = "TOML Example" [owner] name = "Tom Preston-Werner" dob = 1979-05-27T07:32:00-08:00 # First class dates [database] server = "192.168.1.1" ports = [ 8001, 8001, 8002 ] connection_max = 5000 enabled = true enabled = false [servers] # Indentation (tabs and/or spaces) is allowed but not required [servers.alpha] ip = "10.0.0.1" dc = "eqdc10" [servers.beta] ip = "10.0.0.2" dc = "eqdc10" [clients] data = [ ["gamma", "delta"], [1, 2] ] # Line breaks are OK when inside arrays hosts = [ "alpha", "omega" ] `) require.NoError(t, err) repr.Println(toml) } golang-github-alecthomas-participle-v2-2.1.4/api.go000066400000000000000000000010711505300366400221430ustar00rootroot00000000000000package participle import ( "github.com/alecthomas/participle/v2/lexer" ) // Capture can be implemented by fields in order to transform captured tokens into field values. type Capture interface { Capture(values []string) error } // The Parseable interface can be implemented by any element in the grammar to provide custom parsing. type Parseable interface { // Parse into the receiver. // // Should return NextMatch if no tokens matched and parsing should continue. // Nil should be returned if parsing was successful. Parse(lex *lexer.PeekingLexer) error } golang-github-alecthomas-participle-v2-2.1.4/bin/000077500000000000000000000000001505300366400216145ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/bin/.go-1.23.5.pkg000077700000000000000000000000001505300366400247342hermitustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/bin/.golangci-lint-1.63.4.pkg000077700000000000000000000000001505300366400270612hermitustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/bin/.goreleaser-1.26.2.pkg000077700000000000000000000000001505300366400264572hermitustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/bin/README.hermit.md000066400000000000000000000004161505300366400243630ustar00rootroot00000000000000# Hermit environment This is a [Hermit](https://github.com/cashapp/hermit) bin directory. The symlinks in this directory are managed by Hermit and will automatically download and install Hermit itself as well as packages. These packages are local to this environment. golang-github-alecthomas-participle-v2-2.1.4/bin/activate-hermit000077500000000000000000000010621505300366400246270ustar00rootroot00000000000000#!/bin/bash # This file must be used with "source bin/activate-hermit" from bash or zsh. # You cannot run it directly if [ "${BASH_SOURCE-}" = "$0" ]; then echo "You must source this script: \$ source $0" >&2 exit 33 fi BIN_DIR="$(dirname "${BASH_SOURCE[0]:-${(%):-%x}}")" if "${BIN_DIR}/hermit" noop > /dev/null; then eval "$("${BIN_DIR}/hermit" activate "${BIN_DIR}/..")" if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ]; then hash -r 2>/dev/null fi echo "Hermit environment $("${HERMIT_ENV}"/bin/hermit env HERMIT_ENV) activated" fi golang-github-alecthomas-participle-v2-2.1.4/bin/go000077700000000000000000000000001505300366400240512.go-1.23.5.pkgustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/bin/gofmt000077700000000000000000000000001505300366400245602.go-1.23.5.pkgustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/bin/golangci-lint000077700000000000000000000000001505300366400303202.golangci-lint-1.63.4.pkgustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/bin/goreleaser000077700000000000000000000000001505300366400273172.goreleaser-1.26.2.pkgustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/bin/hermit000077500000000000000000000013611505300366400230330ustar00rootroot00000000000000#!/bin/bash set -eo pipefail if [ -z "${HERMIT_STATE_DIR}" ]; then case "$(uname -s)" in Darwin) export HERMIT_STATE_DIR="${HOME}/Library/Caches/hermit" ;; Linux) export HERMIT_STATE_DIR="${XDG_CACHE_HOME:-${HOME}/.cache}/hermit" ;; esac fi export HERMIT_DIST_URL="${HERMIT_DIST_URL:-https://github.com/cashapp/hermit/releases/download/stable}" HERMIT_CHANNEL="$(basename "${HERMIT_DIST_URL}")" export HERMIT_CHANNEL export HERMIT_EXE=${HERMIT_EXE:-${HERMIT_STATE_DIR}/pkg/hermit@${HERMIT_CHANNEL}/hermit} if [ ! -x "${HERMIT_EXE}" ]; then echo "Bootstrapping ${HERMIT_EXE} from ${HERMIT_DIST_URL}" 1>&2 curl -fsSL "${HERMIT_DIST_URL}/install.sh" | /bin/bash 1>&2 fi exec "${HERMIT_EXE}" --level=fatal exec "$0" -- "$@" golang-github-alecthomas-participle-v2-2.1.4/bin/hermit.hcl000066400000000000000000000000651505300366400235750ustar00rootroot00000000000000env = { "PATH": "${HERMIT_ENV}/scripts:${PATH}", } golang-github-alecthomas-participle-v2-2.1.4/cmd/000077500000000000000000000000001505300366400216075ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/000077500000000000000000000000001505300366400237435ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/codegen.go.tmpl000066400000000000000000000060311505300366400266510ustar00rootroot00000000000000// Code generated by Participle. DO NOT EDIT. {{if .Tags}}//go:build {{.Tags}} {{end -}} package {{.Package}} import ( "fmt" "io" "strings" "sync" "unicode/utf8" "regexp/syntax" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) var _ syntax.Op var _ fmt.State const _ = utf8.RuneError var {{.Name}}BackRefCache sync.Map var {{.Name}}Lexer lexer.Definition = lexer{{.Name}}DefinitionImpl{} type lexer{{.Name}}DefinitionImpl struct {} func (lexer{{.Name}}DefinitionImpl) Symbols() map[string]lexer.TokenType { return map[string]lexer.TokenType{ {{- range $sym, $rn := .Def.Symbols}} "{{$sym}}": {{$rn}}, {{- end}} } } func (lexer{{.Name}}DefinitionImpl) LexString(filename string, s string) (lexer.Lexer, error) { return &lexer{{.Name}}Impl{ s: s, pos: lexer.Position{ Filename: filename, Line: 1, Column: 1, }, states: []lexer{{.Name}}State{ {name: "Root"} }, }, nil } func (d lexer{{.Name}}DefinitionImpl) LexBytes(filename string, b []byte) (lexer.Lexer, error) { return d.LexString(filename, string(b)) } func (d lexer{{.Name}}DefinitionImpl) Lex(filename string, r io.Reader) (lexer.Lexer, error) { s := &strings.Builder{} _, err := io.Copy(s, r) if err != nil { return nil, err } return d.LexString(filename, s.String()) } type lexer{{.Name}}State struct { name string groups []string } type lexer{{.Name}}Impl struct { s string p int pos lexer.Position states []lexer{{.Name}}State } func (l *lexer{{.Name}}Impl) Next() (lexer.Token, error) { if l.p == len(l.s) { return lexer.EOFToken(l.pos), nil } var ( state = l.states[len(l.states)-1] groups []int sym lexer.TokenType ) switch state.name { {{- range $state := .Def.Rules|OrderRules}} case "{{$state.Name}}": {{- range $i, $rule := $state.Rules}} {{- if $i}} else {{end -}} {{- if .Pattern -}} if match := match{{$.Name}}{{.Name}}(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 { sym = {{index $.Def.Symbols .Name}} groups = match[:] {{- else if .|IsReturn -}} if true { {{- end}} {{- if .|IsPush}} l.states = append(l.states, lexer{{$.Name}}State{name: "{{.|IsPush}}"{{if HaveBackrefs $.Def $state.Name}}, groups: l.sgroups(groups){{end}}}) {{- else if (or (.|IsPop) (.|IsReturn))}} l.states = l.states[:len(l.states)-1] {{- if .|IsReturn}} return l.Next() {{- end}} {{- else if not .Action}} {{- else}} Unsupported action {{.Action}} {{- end}} } {{- end}} {{- end}} } if groups == nil { sample := []rune(l.s[l.p:]) if len(sample) > 16 { sample = append(sample[:16], []rune("...")...) } return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", string(sample)) } pos := l.pos span := l.s[groups[0]:groups[1]] l.p = groups[1] l.pos.Advance(span) return lexer.Token{ Type: sym, Value: span, Pos: pos, }, nil } func (l *lexer{{.Name}}Impl) sgroups(match []int) []string { sgroups := make([]string, len(match)/2) for i := 0; i < len(match)-1; i += 2 { sgroups[i/2] = l.s[l.p+match[i]:l.p+match[i+1]] } return sgroups }golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/gen_lexer_cmd.go000066400000000000000000000277711505300366400271030ustar00rootroot00000000000000package main import ( _ "embed" // For go:embed. "encoding/json" "fmt" "io" "os" "regexp" "regexp/syntax" "sort" "text/template" "unicode" "unicode/utf8" "github.com/alecthomas/participle/v2/lexer" ) type genLexerCmd struct { Name string `help:"Name of the lexer."` Output string `short:"o" help:"Output file."` Tags string `help:"Build tags to include in the generated file."` Package string `arg:"" required:"" help:"Go package for generated code."` Lexer *os.File `arg:"" default:"-" help:"JSON representation of a Participle lexer (read from stdin if omitted)."` } func (c *genLexerCmd) Help() string { return ` Generates Go code implementing the given JSON representation of a lexer. The generated code should in general by around 10x faster and produce zero garbage per token. ` } func (c *genLexerCmd) Run() error { rules := lexer.Rules{} err := json.NewDecoder(c.Lexer).Decode(&rules) if err != nil { return err } def, err := lexer.New(rules) if err != nil { return err } out := os.Stdout if c.Output != "" { out, err = os.Create(c.Output) if err != nil { return err } defer out.Close() } err = generateLexer(out, c.Package, def, c.Name, c.Tags) if err != nil { return err } return nil } var ( //go:embed codegen.go.tmpl codegenTemplateSource string codegenBackrefRe = regexp.MustCompile(`(\\+)(\d)`) codegenTemplate = template.Must(template.New("lexgen").Funcs(template.FuncMap{ "IsPush": func(r lexer.Rule) string { if p, ok := r.Action.(lexer.ActionPush); ok { return p.State } return "" }, "IsPop": func(r lexer.Rule) bool { _, ok := r.Action.(lexer.ActionPop) return ok }, "IsReturn": func(r lexer.Rule) bool { return r == lexer.ReturnRule }, "OrderRules": orderRules, "HaveBackrefs": func(def *lexer.StatefulDefinition, state string) bool { for _, rule := range def.Rules()[state] { if codegenBackrefRe.MatchString(rule.Pattern) { return true } } return false }, }).Parse(codegenTemplateSource)) ) func generateLexer(w io.Writer, pkg string, def *lexer.StatefulDefinition, name, tags string) error { type ctx struct { Package string Name string Tags string Def *lexer.StatefulDefinition } rules := def.Rules() err := codegenTemplate.Execute(w, ctx{pkg, name, tags, def}) if err != nil { return err } seen := map[string]bool{} // Rules can be duplicated by Include(). for _, rules := range orderRules(rules) { for _, rule := range rules.Rules { if rule.Name == "" { panic(rule) } if seen[rule.Name] { continue } seen[rule.Name] = true fmt.Fprintf(w, "\n") err := generateRegexMatch(w, name, rule.Name, rule.Pattern) if err != nil { return err } } } return nil } type orderedRule struct { Name string Rules []lexer.Rule } func orderRules(rules lexer.Rules) []orderedRule { orderedRules := []orderedRule{} for name, rules := range rules { orderedRules = append(orderedRules, orderedRule{ Name: name, Rules: rules, }) } sort.Slice(orderedRules, func(i, j int) bool { return orderedRules[i].Name < orderedRules[j].Name }) return orderedRules } func generateRegexMatch(w io.Writer, lexerName, name, pattern string) error { if codegenBackrefRe.FindStringIndex(pattern) != nil { fmt.Fprintf(w, "func match%s%s(s string, p int, backrefs []string) (groups []int) {\n", lexerName, name) fmt.Fprintf(w, " re, err := lexer.BackrefRegex(%sBackRefCache, %q, backrefs)\n", lexerName, pattern) fmt.Fprintf(w, " if err != nil { panic(fmt.Sprintf(\"%%s: %%s\", err, backrefs)) }\n") fmt.Fprintf(w, " return re.FindStringSubmatchIndex(s[p:])\n") fmt.Fprintf(w, "}\n") return nil } re, err := syntax.Parse(pattern, syntax.Perl) if err != nil { return err } ids := map[string]int{} idn := 0 reid := func(re *syntax.Regexp) int { key := re.Op.String() + ":" + re.String() id, ok := ids[key] if ok { return id } id = idn idn++ ids[key] = id return id } exists := func(re *syntax.Regexp) bool { key := re.Op.String() + ":" + re.String() _, ok := ids[key] return ok } re = re.Simplify() fmt.Fprintf(w, "// %s\n", re) fmt.Fprintf(w, "func match%s%s(s string, p int, backrefs []string) (groups [%d]int) {\n", lexerName, name, 2*re.MaxCap()+2) flattened := flatten(re) // Fast-path a single literal. if len(flattened) == 1 && re.Op == syntax.OpLiteral { n := utf8.RuneCountInString(string(re.Rune)) if re.Flags&syntax.FoldCase != 0 { fmt.Fprintf(w, "if p+%d <= len(s) && strings.EqualFold(s[p:p+%d], %q) {\n", n, n, string(re.Rune)) } else { if n == 1 { fmt.Fprintf(w, "if p < len(s) && s[p] == %q {\n", re.Rune[0]) } else { fmt.Fprintf(w, "if p+%d <= len(s) && s[p:p+%d] == %q {\n", n, n, string(re.Rune)) } } fmt.Fprintf(w, "groups[0] = p\n") fmt.Fprintf(w, "groups[1] = p + %d\n", n) fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "return\n") fmt.Fprintf(w, "}\n") return nil } for _, re := range flattened { if exists(re) { continue } fmt.Fprintf(w, "// %s (%s)\n", re, re.Op) fmt.Fprintf(w, "l%d := func(s string, p int) int {\n", reid(re)) if re.Flags&syntax.NonGreedy != 0 { panic("non-greedy match not supported: " + re.String()) } switch re.Op { case syntax.OpNoMatch: // matches no strings fmt.Fprintf(w, "return p\n") case syntax.OpEmptyMatch: // matches empty string fmt.Fprintf(w, "if len(s) == 0 { return p }\n") fmt.Fprintf(w, "return -1\n") case syntax.OpLiteral: // matches Runes sequence n := utf8.RuneCountInString(string(re.Rune)) if re.Flags&syntax.FoldCase != 0 { if n == 1 && !unicode.IsLetter(re.Rune[0]) { fmt.Fprintf(w, "if p < len(s) && s[p] == %q { return p+1 }\n", re.Rune[0]) } else { fmt.Fprintf(w, "if p+%d <= len(s) && strings.EqualFold(s[p:p+%d], %q) { return p+%d }\n", n, n, string(re.Rune), n) } } else { if n == 1 { fmt.Fprintf(w, "if p < len(s) && s[p] == %q { return p+1 }\n", re.Rune[0]) } else { fmt.Fprintf(w, "if p+%d <= len(s) && s[p:p+%d] == %q { return p+%d }\n", n, n, string(re.Rune), n) } } fmt.Fprintf(w, "return -1\n") case syntax.OpCharClass: // matches Runes interpreted as range pair list fmt.Fprintf(w, "if len(s) <= p { return -1 }\n") needDecode := false asciiSet := true for i := 0; i < len(re.Rune); i += 2 { l, r := re.Rune[i], re.Rune[i+1] ln, rn := utf8.RuneLen(l), utf8.RuneLen(r) if ln != 1 || rn != 1 { needDecode = true } if l > 0x7f || r > 0x7f || l != r { asciiSet = false } } if needDecode { fmt.Fprintf(w, "var (rn rune; n int)\n") decodeRune(w, "p", "rn", "n") } else { fmt.Fprintf(w, "rn := s[p]\n") } if asciiSet { if len(re.Rune) == 2 { fmt.Fprintf(w, "if rn == %q { return p+1 }\n", re.Rune[0]) } else if len(re.Rune) == 4 { fmt.Fprintf(w, "if rn == %q || rn == %q { return p+1 }\n", re.Rune[0], re.Rune[2]) } else { fmt.Fprintf(w, "switch rn {\n") fmt.Fprintf(w, "case ") for i := 0; i < len(re.Rune); i += 2 { if i != 0 { fmt.Fprintf(w, ",") } fmt.Fprintf(w, "%q", re.Rune[i]) } fmt.Fprintf(w, ": return p+1\n") fmt.Fprintf(w, "}\n") } } else { fmt.Fprintf(w, "switch {\n") for i := 0; i < len(re.Rune); i += 2 { l, r := re.Rune[i], re.Rune[i+1] ln, rn := utf8.RuneLen(l), utf8.RuneLen(r) if ln == 1 && rn == 1 { if l == r { fmt.Fprintf(w, "case rn == %q: return p+1\n", l) } else { fmt.Fprintf(w, "case rn >= %q && rn <= %q: return p+1\n", l, r) } } else { if l == r { fmt.Fprintf(w, "case rn == %q: return p+n\n", l) } else { fmt.Fprintf(w, "case rn >= %q && rn <= %q: return p+n\n", l, r) } } } fmt.Fprintf(w, "}\n") } fmt.Fprintf(w, "return -1\n") case syntax.OpAnyCharNotNL: // matches any character except newline fmt.Fprintf(w, "var (rn rune; n int)\n") decodeRune(w, "p", "rn", "n") fmt.Fprintf(w, "if len(s) <= p+n || rn == '\\n' { return -1 }\n") fmt.Fprintf(w, "return p+n\n") case syntax.OpAnyChar: // matches any character fmt.Fprintf(w, "var n int\n") fmt.Fprintf(w, "if s[p] < utf8.RuneSelf {\n") fmt.Fprintf(w, " n = 1\n") fmt.Fprintf(w, "} else {\n") fmt.Fprintf(w, " _, n = utf8.DecodeRuneInString(s[p:])\n") fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "if len(s) <= p+n { return -1 }\n") fmt.Fprintf(w, "return p+n\n") case syntax.OpWordBoundary, syntax.OpNoWordBoundary, syntax.OpBeginText, syntax.OpEndText, syntax.OpBeginLine, syntax.OpEndLine: fmt.Fprintf(w, "var l, u rune = -1, -1\n") fmt.Fprintf(w, "if p == 0 {\n") fmt.Fprintf(w, " if p < len(s) {\n") decodeRune(w, "0", "u", "_") fmt.Fprintf(w, " }\n") fmt.Fprintf(w, "} else if p == len(s) {\n") fmt.Fprintf(w, " l, _ = utf8.DecodeLastRuneInString(s)\n") fmt.Fprintf(w, "} else {\n") fmt.Fprintf(w, " l, _ = utf8.DecodeLastRuneInString(s[0:p])\n") decodeRune(w, "p", "u", "_") fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "op := syntax.EmptyOpContext(l, u)\n") lut := map[syntax.Op]string{ syntax.OpWordBoundary: "EmptyWordBoundary", syntax.OpNoWordBoundary: "EmptyNoWordBoundary", syntax.OpBeginText: "EmptyBeginText", syntax.OpEndText: "EmptyEndText", syntax.OpBeginLine: "EmptyBeginLine", syntax.OpEndLine: "EmptyEndLine", } fmt.Fprintf(w, "if op & syntax.%s != 0 { return p }\n", lut[re.Op]) fmt.Fprintf(w, "return -1\n") case syntax.OpCapture: // capturing subexpression with index Cap, optional name Name fmt.Fprintf(w, "np := l%d(s, p)\n", reid(re.Sub0[0])) fmt.Fprintf(w, "if np != -1 {\n") fmt.Fprintf(w, " groups[%d] = p\n", re.Cap*2) fmt.Fprintf(w, " groups[%d] = np\n", re.Cap*2+1) fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "return np") case syntax.OpStar: // matches Sub[0] zero or more times fmt.Fprintf(w, "for len(s) > p {\n") fmt.Fprintf(w, "if np := l%d(s, p); np == -1 { return p } else { p = np }\n", reid(re.Sub0[0])) fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "return p\n") case syntax.OpPlus: // matches Sub[0] one or more times fmt.Fprintf(w, "if p = l%d(s, p); p == -1 { return -1 }\n", reid(re.Sub0[0])) fmt.Fprintf(w, "for len(s) > p {\n") fmt.Fprintf(w, "if np := l%d(s, p); np == -1 { return p } else { p = np }\n", reid(re.Sub0[0])) fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "return p\n") case syntax.OpQuest: // matches Sub[0] zero or one times fmt.Fprintf(w, "if np := l%d(s, p); np != -1 { return np }\n", reid(re.Sub0[0])) fmt.Fprintf(w, "return p\n") case syntax.OpRepeat: // matches Sub[0] at least Min times, at most Max (Max == -1 is no limit) panic("??") case syntax.OpConcat: // matches concatenation of Subs for _, sub := range re.Sub { fmt.Fprintf(w, "if p = l%d(s, p); p == -1 { return -1 }\n", reid(sub)) } fmt.Fprintf(w, "return p\n") case syntax.OpAlternate: // matches alternation of Subs for _, sub := range re.Sub { fmt.Fprintf(w, "if np := l%d(s, p); np != -1 { return np }\n", reid(sub)) } fmt.Fprintf(w, "return -1\n") } fmt.Fprintf(w, "}\n") } fmt.Fprintf(w, "np := l%d(s, p)\n", reid(re)) fmt.Fprintf(w, "if np == -1 {\n") fmt.Fprintf(w, " return\n") fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "groups[0] = p\n") fmt.Fprintf(w, "groups[1] = np\n") fmt.Fprintf(w, "return\n") fmt.Fprintf(w, "}\n") return nil } // This exists because of https://github.com/golang/go/issues/31666 func decodeRune(w io.Writer, offset string, rn string, n string) { fmt.Fprintf(w, "if s[%s] < utf8.RuneSelf {\n", offset) fmt.Fprintf(w, " %s, %s = rune(s[%s]), 1\n", rn, n, offset) fmt.Fprintf(w, "} else {\n") fmt.Fprintf(w, " %s, %s = utf8.DecodeRuneInString(s[%s:])\n", rn, n, offset) fmt.Fprintf(w, "}\n") } func flatten(re *syntax.Regexp) (out []*syntax.Regexp) { for _, sub := range re.Sub { out = append(out, flatten(sub)...) } out = append(out, re) return } func isSimpleRuneRange(runes []rune) bool { for i := 0; i < len(runes); i += 2 { if runes[i] != runes[i+1] || utf8.RuneLen(runes[i]) != 1 { return false } } return true } golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/go.mod000066400000000000000000000003251505300366400250510ustar00rootroot00000000000000module github.com/alecthomas/participle/v2/cmd/participle go 1.18 require ( github.com/alecthomas/kong v1.6.1 github.com/alecthomas/participle/v2 v2.1.1 ) replace github.com/alecthomas/participle/v2 => ../.. golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/go.sum000066400000000000000000000044601505300366400251020ustar00rootroot00000000000000github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0= github.com/alecthomas/kong v0.6.1 h1:1kNhcFepkR+HmasQpbiKDLylIL8yh5B5y1zPp5bJimA= github.com/alecthomas/kong v0.6.1/go.mod h1:JfHWDzLmbh/puW6I3V7uWenoh56YNVONW+w8eKeUr9I= github.com/alecthomas/kong v0.7.1 h1:azoTh0IOfwlAX3qN9sHWTxACE2oV8Bg2gAwBsMwDQY4= github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= github.com/alecthomas/kong v0.8.0 h1:ryDCzutfIqJPnNn0omnrgHLbAggDQM2VWHikE1xqK7s= github.com/alecthomas/kong v0.8.0/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= github.com/alecthomas/kong v0.8.1 h1:acZdn3m4lLRobeh3Zi2S2EpnXTd1mOL6U7xVml+vfkY= github.com/alecthomas/kong v0.8.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= github.com/alecthomas/kong v1.2.1 h1:E8jH4Tsgv6wCRX2nGrdPyHDUCSG83WH2qE4XLACD33Q= github.com/alecthomas/kong v1.2.1/go.mod h1:rKTSFhbdp3Ryefn8x5MOEprnRFQ7nlmMC01GKhehhBM= github.com/alecthomas/kong v1.6.1 h1:/7bVimARU3uxPD0hbryPE8qWrS3Oz3kPQoxA/H2NKG8= github.com/alecthomas/kong v1.6.1/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU= github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/main.go000066400000000000000000000006711505300366400252220ustar00rootroot00000000000000package main import "github.com/alecthomas/kong" var ( version string = "dev" cli struct { Version kong.VersionFlag Gen struct { Lexer genLexerCmd `cmd:"" help:"Generate a lexer."` } `cmd:"" help:"Generate code to accelerate Participle."` } ) func main() { kctx := kong.Parse(&cli, kong.Description(`A command-line tool for Participle.`), kong.Vars{"version": version}, ) err := kctx.Run() kctx.FatalIfErrorf(err) } golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/000077500000000000000000000000001505300366400234045ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/assets/000077500000000000000000000000001505300366400247065ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/assets/railroad-diagrams.css000066400000000000000000000017471505300366400310130ustar00rootroot00000000000000svg.railroad-diagram { background-color: hsl(30,20%,95%); } svg.railroad-diagram path { stroke-width: 3; stroke: black; fill: rgba(0,0,0,0); } svg.railroad-diagram text { font: bold 14px monospace; text-anchor: middle; white-space: pre; } svg.railroad-diagram text.diagram-text { font-size: 12px; } svg.railroad-diagram text.diagram-arrow { font-size: 16px; } svg.railroad-diagram text.label { text-anchor: start; } svg.railroad-diagram text.comment { font: italic 12px monospace; } svg.railroad-diagram g.non-terminal text { /*font-style: italic;*/ } svg.railroad-diagram rect { stroke-width: 3; stroke: black; fill: hsl(120,100%,90%); } svg.railroad-diagram rect.group-box { stroke: gray; stroke-dasharray: 10 5; fill: none; } svg.railroad-diagram path.diagram-text { stroke-width: 3; stroke: black; fill: white; cursor: help; } svg.railroad-diagram g.diagram-text:hover path.diagram-text { fill: #eee; } golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/assets/railroad-diagrams.js000066400000000000000000001261631505300366400306370ustar00rootroot00000000000000"use strict"; /* Railroad Diagrams by Tab Atkins Jr. (and others) http://xanthir.com http://twitter.com/tabatkins http://github.com/tabatkins/railroad-diagrams This document and all associated files in the github project are licensed under CC0: http://creativecommons.org/publicdomain/zero/1.0/ This means you can reuse, remix, or otherwise appropriate this project for your own use WITHOUT RESTRICTION. (The actual legal meaning can be found at the above link.) Don't ask me for permission to use any part of this project, JUST USE IT. I would appreciate attribution, but that is not required by the license. */ /* This file uses a module pattern to avoid leaking names into the global scope. Should be compatible with AMD, CommonJS, and plain ol' browser JS. As well, several configuration constants are passed into the module function at the bottom of this file. At runtime, these constants can be found on the Diagram class, and can be changed before creating a Diagram. */ (function(options) { var funcs = {}; function subclassOf(baseClass, superClass) { baseClass.prototype = Object.create(superClass.prototype); baseClass.prototype.$super = superClass.prototype; } function unnull(/* children */) { return [].slice.call(arguments).reduce(function(sofar, x) { return sofar !== undefined ? sofar : x; }); } function determineGaps(outer, inner) { var diff = outer - inner; switch(Diagram.INTERNAL_ALIGNMENT) { case 'left': return [0, diff]; break; case 'right': return [diff, 0]; break; case 'center': default: return [diff/2, diff/2]; break; } } function wrapString(value) { return value instanceof FakeSVG ? value : new Terminal(""+value); } function sum(iter, func) { if(!func) func = function(x) { return x; }; return iter.map(func).reduce(function(a,b){return a+b}, 0); } function max(iter, func) { if(!func) func = function(x) { return x; }; return Math.max.apply(null, iter.map(func)); } function* enumerate(iter) { var count = 0; for(const x of iter) { yield [count, x]; count++; } } var SVG = funcs.SVG = function SVG(name, attrs, text) { attrs = attrs || {}; text = text || ''; var el = document.createElementNS("http://www.w3.org/2000/svg",name); for(var attr in attrs) { if(attr === 'xlink:href') el.setAttributeNS("http://www.w3.org/1999/xlink", 'href', attrs[attr]); else el.setAttribute(attr, attrs[attr]); } el.textContent = text; return el; } var FakeSVG = funcs.FakeSVG = function FakeSVG(tagName, attrs, text){ if(!(this instanceof FakeSVG)) return new FakeSVG(tagName, attrs, text); if(text) this.children = text; else this.children = []; this.tagName = tagName; this.attrs = unnull(attrs, {}); return this; }; FakeSVG.prototype.format = function(x, y, width) { // Virtual }; FakeSVG.prototype.addTo = function(parent) { if(parent instanceof FakeSVG) { parent.children.push(this); return this; } else { var svg = this.toSVG(); parent.appendChild(svg); return svg; } }; FakeSVG.prototype.escapeString = function(string) { // Escape markdown and HTML special characters return string.replace(/[*_\`\[\]<&]/g, function(charString) { return '&#' + charString.charCodeAt(0) + ';'; }); }; FakeSVG.prototype.toSVG = function() { var el = SVG(this.tagName, this.attrs); if(typeof this.children == 'string') { el.textContent = this.children; } else { this.children.forEach(function(e) { el.appendChild(e.toSVG()); }); } return el; }; FakeSVG.prototype.toString = function() { var str = '<' + this.tagName; var group = this.tagName == "g" || this.tagName == "svg"; for(var attr in this.attrs) { str += ' ' + attr + '="' + (this.attrs[attr]+'').replace(/&/g, '&').replace(/"/g, '"') + '"'; } str += '>'; if(group) str += "\n"; if(typeof this.children == 'string') { str += FakeSVG.prototype.escapeString(this.children); } else { this.children.forEach(function(e) { str += e; }); } str += '\n'; return str; } FakeSVG.prototype.walk = function(cb) { cb(this); } var Path = funcs.Path = function Path(x,y) { if(!(this instanceof Path)) return new Path(x,y); FakeSVG.call(this, 'path'); this.attrs.d = "M"+x+' '+y; } subclassOf(Path, FakeSVG); Path.prototype.m = function(x,y) { this.attrs.d += 'm'+x+' '+y; return this; } Path.prototype.h = function(val) { this.attrs.d += 'h'+val; return this; } Path.prototype.right = function(val) { return this.h(Math.max(0, val)); } Path.prototype.left = function(val) { return this.h(-Math.max(0, val)); } Path.prototype.v = function(val) { this.attrs.d += 'v'+val; return this; } Path.prototype.down = function(val) { return this.v(Math.max(0, val)); } Path.prototype.up = function(val) { return this.v(-Math.max(0, val)); } Path.prototype.arc = function(sweep){ // 1/4 of a circle var x = Diagram.ARC_RADIUS; var y = Diagram.ARC_RADIUS; if(sweep[0] == 'e' || sweep[1] == 'w') { x *= -1; } if(sweep[0] == 's' || sweep[1] == 'n') { y *= -1; } if(sweep == 'ne' || sweep == 'es' || sweep == 'sw' || sweep == 'wn') { var cw = 1; } else { var cw = 0; } this.attrs.d += "a"+Diagram.ARC_RADIUS+" "+Diagram.ARC_RADIUS+" 0 0 "+cw+' '+x+' '+y; return this; } Path.prototype.arc_8 = function(start, dir) { // 1/8 of a circle const arc = Diagram.ARC_RADIUS; const s2 = 1/Math.sqrt(2) * arc; const s2inv = (arc - s2); let path = "a " + arc + " " + arc + " 0 0 " + (dir=='cw' ? "1" : "0") + " "; const sd = start+dir; const offset = sd == 'ncw' ? [s2, s2inv] : sd == 'necw' ? [s2inv, s2] : sd == 'ecw' ? [-s2inv, s2] : sd == 'secw' ? [-s2, s2inv] : sd == 'scw' ? [-s2, -s2inv] : sd == 'swcw' ? [-s2inv, -s2] : sd == 'wcw' ? [s2inv, -s2] : sd == 'nwcw' ? [s2, -s2inv] : sd == 'nccw' ? [-s2, s2inv] : sd == 'nwccw' ? [-s2inv, s2] : sd == 'wccw' ? [s2inv, s2] : sd == 'swccw' ? [s2, s2inv] : sd == 'sccw' ? [s2, -s2inv] : sd == 'seccw' ? [s2inv, -s2] : sd == 'eccw' ? [-s2inv, -s2] : sd == 'neccw' ? [-s2, -s2inv] : null ; path += offset.join(" "); this.attrs.d += path; return this; } Path.prototype.l = function(x, y) { this.attrs.d += 'l'+x+' '+y; return this; } Path.prototype.format = function() { // All paths in this library start/end horizontally. // The extra .5 ensures a minor overlap, so there's no seams in bad rasterizers. this.attrs.d += 'h.5'; return this; } var DiagramMultiContainer = funcs.DiagramMultiContainer = function DiagramMultiContainer(tagName, items, attrs, text) { FakeSVG.call(this, tagName, attrs, text); this.items = items.map(wrapString); } subclassOf(DiagramMultiContainer, FakeSVG); DiagramMultiContainer.prototype.walk = function(cb) { cb(this); this.items.forEach(x=>w.walk(cb)); } var Diagram = funcs.Diagram = function Diagram(items) { if(!(this instanceof Diagram)) return new Diagram([].slice.call(arguments)); DiagramMultiContainer.call(this, 'svg', items, {class: Diagram.DIAGRAM_CLASS}); if(!(this.items[0] instanceof Start)) { this.items.unshift(new Start()); } if(!(this.items[this.items.length-1] instanceof End)) { this.items.push(new End()); } this.up = this.down = this.height = this.width = 0; for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; this.width += item.width + (item.needsSpace?20:0); this.up = Math.max(this.up, item.up - this.height); this.height += item.height; this.down = Math.max(this.down - item.height, item.down); } this.formatted = false; } subclassOf(Diagram, DiagramMultiContainer); for(var option in options) { Diagram[option] = options[option]; } Diagram.prototype.format = function(paddingt, paddingr, paddingb, paddingl) { paddingt = unnull(paddingt, 20); paddingr = unnull(paddingr, paddingt, 20); paddingb = unnull(paddingb, paddingt, 20); paddingl = unnull(paddingl, paddingr, 20); var x = paddingl; var y = paddingt; y += this.up; var g = FakeSVG('g', Diagram.STROKE_ODD_PIXEL_LENGTH ? {transform:'translate(.5 .5)'} : {}); for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; if(item.needsSpace) { Path(x,y).h(10).addTo(g); x += 10; } item.format(x, y, item.width).addTo(g); x += item.width; y += item.height; if(item.needsSpace) { Path(x,y).h(10).addTo(g); x += 10; } } this.attrs.width = this.width + paddingl + paddingr; this.attrs.height = this.up + this.height + this.down + paddingt + paddingb; this.attrs.viewBox = "0 0 " + this.attrs.width + " " + this.attrs.height; g.addTo(this); this.formatted = true; return this; } Diagram.prototype.addTo = function(parent) { if(!parent) { var scriptTag = document.getElementsByTagName('script'); scriptTag = scriptTag[scriptTag.length - 1]; parent = scriptTag.parentNode; } return this.$super.addTo.call(this, parent); } Diagram.prototype.toSVG = function() { if (!this.formatted) { this.format(); } return this.$super.toSVG.call(this); } Diagram.prototype.toString = function() { if (!this.formatted) { this.format(); } return this.$super.toString.call(this); } Diagram.DEBUG = false; var ComplexDiagram = funcs.ComplexDiagram = function ComplexDiagram() { var diagram = new Diagram([].slice.call(arguments)); var items = diagram.items; items.shift(); items.pop(); items.unshift(new Start({type:"complex"})); items.push(new End({type:"complex"})); diagram.items = items; return diagram; } var Sequence = funcs.Sequence = function Sequence(items) { if(!(this instanceof Sequence)) return new Sequence([].slice.call(arguments)); DiagramMultiContainer.call(this, 'g', items); var numberOfItems = this.items.length; this.needsSpace = true; this.up = this.down = this.height = this.width = 0; for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; this.width += item.width + (item.needsSpace?20:0); this.up = Math.max(this.up, item.up - this.height); this.height += item.height; this.down = Math.max(this.down - item.height, item.down); } if(this.items[0].needsSpace) this.width -= 10; if(this.items[this.items.length-1].needsSpace) this.width -= 10; if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "sequence" } } subclassOf(Sequence, DiagramMultiContainer); Sequence.prototype.format = function(x,y,width) { // Hook up the two sides if this is narrower than its stated width. var gaps = determineGaps(width, this.width); Path(x,y).h(gaps[0]).addTo(this); Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this); x += gaps[0]; for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; if(item.needsSpace && i > 0) { Path(x,y).h(10).addTo(this); x += 10; } item.format(x, y, item.width).addTo(this); x += item.width; y += item.height; if(item.needsSpace && i < this.items.length-1) { Path(x,y).h(10).addTo(this); x += 10; } } return this; } var Stack = funcs.Stack = function Stack(items) { if(!(this instanceof Stack)) return new Stack([].slice.call(arguments)); DiagramMultiContainer.call(this, 'g', items); if( items.length === 0 ) { throw new RangeError("Stack() must have at least one child."); } this.width = Math.max.apply(null, this.items.map(function(e) { return e.width + (e.needsSpace?20:0); })); //if(this.items[0].needsSpace) this.width -= 10; //if(this.items[this.items.length-1].needsSpace) this.width -= 10; if(this.items.length > 1){ this.width += Diagram.ARC_RADIUS*2; } this.needsSpace = true; this.up = this.items[0].up; this.down = this.items[this.items.length-1].down; this.height = 0; var last = this.items.length - 1; for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; this.height += item.height; if(i > 0) { this.height += Math.max(Diagram.ARC_RADIUS*2, item.up + Diagram.VERTICAL_SEPARATION); } if(i < last) { this.height += Math.max(Diagram.ARC_RADIUS*2, item.down + Diagram.VERTICAL_SEPARATION); } } if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "stack" } } subclassOf(Stack, DiagramMultiContainer); Stack.prototype.format = function(x,y,width) { var gaps = determineGaps(width, this.width); Path(x,y).h(gaps[0]).addTo(this); x += gaps[0]; var xInitial = x; if(this.items.length > 1) { Path(x, y).h(Diagram.ARC_RADIUS).addTo(this); x += Diagram.ARC_RADIUS; } for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; var innerWidth = this.width - (this.items.length>1 ? Diagram.ARC_RADIUS*2 : 0); item.format(x, y, innerWidth).addTo(this); x += innerWidth; y += item.height; if(i !== this.items.length-1) { Path(x, y) .arc('ne').down(Math.max(0, item.down + Diagram.VERTICAL_SEPARATION - Diagram.ARC_RADIUS*2)) .arc('es').left(innerWidth) .arc('nw').down(Math.max(0, this.items[i+1].up + Diagram.VERTICAL_SEPARATION - Diagram.ARC_RADIUS*2)) .arc('ws').addTo(this); y += Math.max(item.down + Diagram.VERTICAL_SEPARATION, Diagram.ARC_RADIUS*2) + Math.max(this.items[i+1].up + Diagram.VERTICAL_SEPARATION, Diagram.ARC_RADIUS*2); //y += Math.max(Diagram.ARC_RADIUS*4, item.down + Diagram.VERTICAL_SEPARATION*2 + this.items[i+1].up) x = xInitial+Diagram.ARC_RADIUS; } } if(this.items.length > 1) { Path(x,y).h(Diagram.ARC_RADIUS).addTo(this); x += Diagram.ARC_RADIUS; } Path(x,y).h(gaps[1]).addTo(this); return this; } var OptionalSequence = funcs.OptionalSequence = function OptionalSequence(items) { if(!(this instanceof OptionalSequence)) return new OptionalSequence([].slice.call(arguments)); DiagramMultiContainer.call(this, 'g', items); if( items.length === 0 ) { throw new RangeError("OptionalSequence() must have at least one child."); } if( items.length === 1 ) { return new Sequence(items); } var arc = Diagram.ARC_RADIUS; this.needsSpace = false; this.width = 0; this.up = 0; this.height = sum(this.items, function(x){return x.height}); this.down = this.items[0].down; var heightSoFar = 0; for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; this.up = Math.max(this.up, Math.max(arc*2, item.up + Diagram.VERTICAL_SEPARATION) - heightSoFar); heightSoFar += item.height; if(i > 0) { this.down = Math.max(this.height + this.down, heightSoFar + Math.max(arc*2, item.down + Diagram.VERTICAL_SEPARATION)) - this.height; } var itemWidth = (item.needsSpace?10:0) + item.width; if(i == 0) { this.width += arc + Math.max(itemWidth, arc); } else { this.width += arc*2 + Math.max(itemWidth, arc) + arc; } } if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "optseq" } } subclassOf(OptionalSequence, DiagramMultiContainer); OptionalSequence.prototype.format = function(x, y, width) { var arc = Diagram.ARC_RADIUS; var gaps = determineGaps(width, this.width); Path(x, y).right(gaps[0]).addTo(this); Path(x + gaps[0] + this.width, y + this.height).right(gaps[1]).addTo(this); x += gaps[0] var upperLineY = y - this.up; var last = this.items.length - 1; for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; var itemSpace = (item.needsSpace?10:0); var itemWidth = item.width + itemSpace; if(i == 0) { // Upper skip Path(x,y) .arc('se') .up(y - upperLineY - arc*2) .arc('wn') .right(itemWidth - arc) .arc('ne') .down(y + item.height - upperLineY - arc*2) .arc('ws') .addTo(this); // Straight line Path(x, y) .right(itemSpace + arc) .addTo(this); item.format(x + itemSpace + arc, y, item.width).addTo(this); x += itemWidth + arc; y += item.height; // x ends on the far side of the first element, // where the next element's skip needs to begin } else if(i < last) { // Upper skip Path(x, upperLineY) .right(arc*2 + Math.max(itemWidth, arc) + arc) .arc('ne') .down(y - upperLineY + item.height - arc*2) .arc('ws') .addTo(this); // Straight line Path(x,y) .right(arc*2) .addTo(this); item.format(x + arc*2, y, item.width).addTo(this); Path(x + item.width + arc*2, y + item.height) .right(itemSpace + arc) .addTo(this); // Lower skip Path(x,y) .arc('ne') .down(item.height + Math.max(item.down + Diagram.VERTICAL_SEPARATION, arc*2) - arc*2) .arc('ws') .right(itemWidth - arc) .arc('se') .up(item.down + Diagram.VERTICAL_SEPARATION - arc*2) .arc('wn') .addTo(this); x += arc*2 + Math.max(itemWidth, arc) + arc; y += item.height; } else { // Straight line Path(x, y) .right(arc*2) .addTo(this); item.format(x + arc*2, y, item.width).addTo(this); Path(x + arc*2 + item.width, y + item.height) .right(itemSpace + arc) .addTo(this); // Lower skip Path(x,y) .arc('ne') .down(item.height + Math.max(item.down + Diagram.VERTICAL_SEPARATION, arc*2) - arc*2) .arc('ws') .right(itemWidth - arc) .arc('se') .up(item.down + Diagram.VERTICAL_SEPARATION - arc*2) .arc('wn') .addTo(this); } } return this; } var AlternatingSequence = funcs.AlternatingSequence = function AlternatingSequence(items) { if(!(this instanceof AlternatingSequence)) return new AlternatingSequence([].slice.call(arguments)); DiagramMultiContainer.call(this, 'g', items); if( items.length === 1 ) { return new Sequence(items); } if( items.length !== 2 ) { throw new RangeError("AlternatingSequence() must have one or two children."); } this.needsSpace = false; const arc = Diagram.ARC_RADIUS; const vert = Diagram.VERTICAL_SEPARATION; const max = Math.max; const first = this.items[0]; const second = this.items[1]; const arcX = 1 / Math.sqrt(2) * arc * 2; const arcY = (1 - 1 / Math.sqrt(2)) * arc * 2; const crossY = Math.max(arc, Diagram.VERTICAL_SEPARATION); const crossX = (crossY - arcY) + arcX; const firstOut = max(arc + arc, crossY/2 + arc + arc, crossY/2 + vert + first.down); this.up = firstOut + first.height + first.up; const secondIn = max(arc + arc, crossY/2 + arc + arc, crossY/2 + vert + second.up); this.down = secondIn + second.height + second.down; this.height = 0; const firstWidth = 2*(first.needsSpace?10:0) + first.width; const secondWidth = 2*(second.needsSpace?10:0) + second.width; this.width = 2*arc + max(firstWidth, crossX, secondWidth) + 2*arc; if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "altseq" } } subclassOf(AlternatingSequence, DiagramMultiContainer); AlternatingSequence.prototype.format = function(x, y, width) { const arc = Diagram.ARC_RADIUS; const gaps = determineGaps(width, this.width); Path(x,y).right(gaps[0]).addTo(this); console.log(gaps); x += gaps[0]; Path(x+this.width, y).right(gaps[1]).addTo(this); // bounding box //Path(x+gaps[0], y).up(this.up).right(this.width).down(this.up+this.down).left(this.width).up(this.down).addTo(this); const first = this.items[0]; const second = this.items[1]; // top const firstIn = this.up - first.up; const firstOut = this.up - first.up - first.height; Path(x,y).arc('se').up(firstIn-2*arc).arc('wn').addTo(this); first.format(x + 2*arc, y - firstIn, this.width - 4*arc).addTo(this); Path(x + this.width - 2*arc, y - firstOut).arc('ne').down(firstOut - 2*arc).arc('ws').addTo(this); // bottom const secondIn = this.down - second.down - second.height; const secondOut = this.down - second.down; Path(x,y).arc('ne').down(secondIn - 2*arc).arc('ws').addTo(this); second.format(x + 2*arc, y + secondIn, this.width - 4*arc).addTo(this); Path(x + this.width - 2*arc, y + secondOut).arc('se').up(secondOut - 2*arc).arc('wn').addTo(this); // crossover const arcX = 1 / Math.sqrt(2) * arc * 2; const arcY = (1 - 1 / Math.sqrt(2)) * arc * 2; const crossY = Math.max(arc, Diagram.VERTICAL_SEPARATION); const crossX = (crossY - arcY) + arcX; const crossBar = (this.width - 4*arc - crossX)/2; Path(x+arc, y - crossY/2 - arc).arc('ws').right(crossBar) .arc_8('n', 'cw').l(crossX - arcX, crossY - arcY).arc_8('sw', 'ccw') .right(crossBar).arc('ne').addTo(this); Path(x+arc, y + crossY/2 + arc).arc('wn').right(crossBar) .arc_8('s', 'ccw').l(crossX - arcX, -(crossY - arcY)).arc_8('nw', 'cw') .right(crossBar).arc('se').addTo(this); return this; } var Choice = funcs.Choice = function Choice(normal, items) { if(!(this instanceof Choice)) return new Choice(normal, [].slice.call(arguments,1)); DiagramMultiContainer.call(this, 'g', items); if( typeof normal !== "number" || normal !== Math.floor(normal) ) { throw new TypeError("The first argument of Choice() must be an integer."); } else if(normal < 0 || normal >= items.length) { throw new RangeError("The first argument of Choice() must be an index for one of the items."); } else { this.normal = normal; } var first = 0; var last = items.length - 1; this.width = Math.max.apply(null, this.items.map(function(el){return el.width})) + Diagram.ARC_RADIUS*4; this.height = this.items[normal].height; this.up = this.items[first].up; for(var i = first; i < normal; i++) { if(i == normal-1) var arcs = Diagram.ARC_RADIUS*2; else var arcs = Diagram.ARC_RADIUS; this.up += Math.max(arcs, this.items[i].height + this.items[i].down + Diagram.VERTICAL_SEPARATION + this.items[i+1].up); } this.down = this.items[last].down; for(var i = normal+1; i <= last; i++) { if(i == normal+1) var arcs = Diagram.ARC_RADIUS*2; else var arcs = Diagram.ARC_RADIUS; this.down += Math.max(arcs, this.items[i-1].height + this.items[i-1].down + Diagram.VERTICAL_SEPARATION + this.items[i].up); } this.down -= this.items[normal].height; // already counted in Choice.height if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "choice" } } subclassOf(Choice, DiagramMultiContainer); Choice.prototype.format = function(x,y,width) { // Hook up the two sides if this is narrower than its stated width. var gaps = determineGaps(width, this.width); Path(x,y).h(gaps[0]).addTo(this); Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this); x += gaps[0]; var last = this.items.length -1; var innerWidth = this.width - Diagram.ARC_RADIUS*4; // Do the elements that curve above for(var i = this.normal - 1; i >= 0; i--) { var item = this.items[i]; if( i == this.normal - 1 ) { var distanceFromY = Math.max(Diagram.ARC_RADIUS*2, this.items[this.normal].up + Diagram.VERTICAL_SEPARATION + item.down + item.height); } Path(x,y) .arc('se') .up(distanceFromY - Diagram.ARC_RADIUS*2) .arc('wn').addTo(this); item.format(x+Diagram.ARC_RADIUS*2,y - distanceFromY,innerWidth).addTo(this); Path(x+Diagram.ARC_RADIUS*2+innerWidth, y-distanceFromY+item.height) .arc('ne') .down(distanceFromY - item.height + this.height - Diagram.ARC_RADIUS*2) .arc('ws').addTo(this); distanceFromY += Math.max(Diagram.ARC_RADIUS, item.up + Diagram.VERTICAL_SEPARATION + (i == 0 ? 0 : this.items[i-1].down+this.items[i-1].height)); } // Do the straight-line path. Path(x,y).right(Diagram.ARC_RADIUS*2).addTo(this); this.items[this.normal].format(x+Diagram.ARC_RADIUS*2, y, innerWidth).addTo(this); Path(x+Diagram.ARC_RADIUS*2+innerWidth, y+this.height).right(Diagram.ARC_RADIUS*2).addTo(this); // Do the elements that curve below for(var i = this.normal+1; i <= last; i++) { var item = this.items[i]; if( i == this.normal + 1 ) { var distanceFromY = Math.max(Diagram.ARC_RADIUS*2, this.height + this.items[this.normal].down + Diagram.VERTICAL_SEPARATION + item.up); } Path(x,y) .arc('ne') .down(distanceFromY - Diagram.ARC_RADIUS*2) .arc('ws').addTo(this); item.format(x+Diagram.ARC_RADIUS*2, y+distanceFromY, innerWidth).addTo(this); Path(x+Diagram.ARC_RADIUS*2+innerWidth, y+distanceFromY+item.height) .arc('se') .up(distanceFromY - Diagram.ARC_RADIUS*2 + item.height - this.height) .arc('wn').addTo(this); distanceFromY += Math.max(Diagram.ARC_RADIUS, item.height + item.down + Diagram.VERTICAL_SEPARATION + (i == last ? 0 : this.items[i+1].up)); } return this; } var HorizontalChoice = funcs.HorizontalChoice = function HorizontalChoice(items) { if(!(this instanceof HorizontalChoice)) return new HorizontalChoice([].slice.call(arguments)); if( items.length === 0 ) { throw new RangeError("HorizontalChoice() must have at least one child."); } if( items.length === 1) { return new Sequence(items); } DiagramMultiContainer.call(this, 'g', items); const allButLast = this.items.slice(0, -1); const middles = this.items.slice(1, -1); const first = this.items[0]; const last = this.items[this.items.length - 1]; this.needsSpace = false; this.width = Diagram.ARC_RADIUS; // starting track this.width += Diagram.ARC_RADIUS*2 * (this.items.length-1); // inbetween tracks this.width += sum(this.items, x=>x.width + (x.needsSpace?20:0)); // items this.width += (last.height > 0 ? Diagram.ARC_RADIUS : 0); // needs space to curve up this.width += Diagram.ARC_RADIUS; //ending track // Always exits at entrance height this.height = 0; // All but the last have a track running above them this._upperTrack = Math.max( Diagram.ARC_RADIUS*2, Diagram.VERTICAL_SEPARATION, max(allButLast, x=>x.up) + Diagram.VERTICAL_SEPARATION ); this.up = Math.max(this._upperTrack, last.up); // All but the first have a track running below them // Last either straight-lines or curves up, so has different calculation this._lowerTrack = Math.max( Diagram.VERTICAL_SEPARATION, max(middles, x=>x.height+Math.max(x.down+Diagram.VERTICAL_SEPARATION, Diagram.ARC_RADIUS*2)), last.height + last.down + Diagram.VERTICAL_SEPARATION ); if(first.height < this._lowerTrack) { // Make sure there's at least 2*AR room between first exit and lower track this._lowerTrack = Math.max(this._lowerTrack, first.height + Diagram.ARC_RADIUS*2); } this.down = Math.max(this._lowerTrack, first.height + first.down); if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "horizontalchoice" } } subclassOf(HorizontalChoice, DiagramMultiContainer); HorizontalChoice.prototype.format = function(x,y,width) { // Hook up the two sides if this is narrower than its stated width. var gaps = determineGaps(width, this.width); new Path(x,y).h(gaps[0]).addTo(this); new Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this); x += gaps[0]; const first = this.items[0]; const last = this.items[this.items.length-1]; const allButFirst = this.items.slice(1); const allButLast = this.items.slice(0, -1); // upper track var upperSpan = (sum(allButLast, x=>x.width+(x.needsSpace?20:0)) + (this.items.length - 2) * Diagram.ARC_RADIUS*2 - Diagram.ARC_RADIUS ); new Path(x,y) .arc('se') .v(-(this._upperTrack - Diagram.ARC_RADIUS*2)) .arc('wn') .h(upperSpan) .addTo(this); // lower track var lowerSpan = (sum(allButFirst, x=>x.width+(x.needsSpace?20:0)) + (this.items.length - 2) * Diagram.ARC_RADIUS*2 + (last.height > 0 ? Diagram.ARC_RADIUS : 0) - Diagram.ARC_RADIUS ); var lowerStart = x + Diagram.ARC_RADIUS + first.width+(first.needsSpace?20:0) + Diagram.ARC_RADIUS*2; new Path(lowerStart, y+this._lowerTrack) .h(lowerSpan) .arc('se') .v(-(this._lowerTrack - Diagram.ARC_RADIUS*2)) .arc('wn') .addTo(this); // Items for(const [i, item] of enumerate(this.items)) { // input track if(i === 0) { new Path(x,y) .h(Diagram.ARC_RADIUS) .addTo(this); x += Diagram.ARC_RADIUS; } else { new Path(x, y - this._upperTrack) .arc('ne') .v(this._upperTrack - Diagram.ARC_RADIUS*2) .arc('ws') .addTo(this); x += Diagram.ARC_RADIUS*2; } // item var itemWidth = item.width + (item.needsSpace?20:0); item.format(x, y, itemWidth).addTo(this); x += itemWidth; // output track if(i === this.items.length-1) { if(item.height === 0) { new Path(x,y) .h(Diagram.ARC_RADIUS) .addTo(this); } else { new Path(x,y+item.height) .arc('se') .addTo(this); } } else if(i === 0 && item.height > this._lowerTrack) { // Needs to arc up to meet the lower track, not down. if(item.height - this._lowerTrack >= Diagram.ARC_RADIUS*2) { new Path(x, y+item.height) .arc('se') .v(this._lowerTrack - item.height + Diagram.ARC_RADIUS*2) .arc('wn') .addTo(this); } else { // Not enough space to fit two arcs // so just bail and draw a straight line for now. new Path(x, y+item.height) .l(Diagram.ARC_RADIUS*2, this._lowerTrack - item.height) .addTo(this); } } else { new Path(x, y+item.height) .arc('ne') .v(this._lowerTrack - item.height - Diagram.ARC_RADIUS*2) .arc('ws') .addTo(this); } } return this; } var MultipleChoice = funcs.MultipleChoice = function MultipleChoice(normal, type, items) { if(!(this instanceof MultipleChoice)) return new MultipleChoice(normal, type, [].slice.call(arguments,2)); DiagramMultiContainer.call(this, 'g', items); if( typeof normal !== "number" || normal !== Math.floor(normal) ) { throw new TypeError("The first argument of MultipleChoice() must be an integer."); } else if(normal < 0 || normal >= items.length) { throw new RangeError("The first argument of MultipleChoice() must be an index for one of the items."); } else { this.normal = normal; } if( type != "any" && type != "all" ) { throw new SyntaxError("The second argument of MultipleChoice must be 'any' or 'all'."); } else { this.type = type; } this.needsSpace = true; this.innerWidth = max(this.items, function(x){return x.width}); this.width = 30 + Diagram.ARC_RADIUS + this.innerWidth + Diagram.ARC_RADIUS + 20; this.up = this.items[0].up; this.down = this.items[this.items.length-1].down; this.height = this.items[normal].height; for(var i = 0; i < this.items.length; i++) { var item = this.items[i]; if(i == normal - 1 || i == normal + 1) var minimum = 10 + Diagram.ARC_RADIUS; else var minimum = Diagram.ARC_RADIUS; if(i < normal) { this.up += Math.max(minimum, item.height + item.down + Diagram.VERTICAL_SEPARATION + this.items[i+1].up); } else if(i > normal) { this.down += Math.max(minimum, item.up + Diagram.VERTICAL_SEPARATION + this.items[i-1].down + this.items[i-1].height); } } this.down -= this.items[normal].height; // already counted in this.height if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "multiplechoice" } } subclassOf(MultipleChoice, DiagramMultiContainer); MultipleChoice.prototype.format = function(x, y, width) { var gaps = determineGaps(width, this.width); Path(x, y).right(gaps[0]).addTo(this); Path(x + gaps[0] + this.width, y + this.height).right(gaps[1]).addTo(this); x += gaps[0]; var normal = this.items[this.normal]; // Do the elements that curve above for(var i = this.normal - 1; i >= 0; i--) { var item = this.items[i]; if( i == this.normal - 1 ) { var distanceFromY = Math.max(10 + Diagram.ARC_RADIUS, normal.up + Diagram.VERTICAL_SEPARATION + item.down + item.height); } Path(x + 30,y) .up(distanceFromY - Diagram.ARC_RADIUS) .arc('wn').addTo(this); item.format(x + 30 + Diagram.ARC_RADIUS, y - distanceFromY, this.innerWidth).addTo(this); Path(x + 30 + Diagram.ARC_RADIUS + this.innerWidth, y - distanceFromY + item.height) .arc('ne') .down(distanceFromY - item.height + this.height - Diagram.ARC_RADIUS - 10) .addTo(this); if(i != 0) { distanceFromY += Math.max(Diagram.ARC_RADIUS, item.up + Diagram.VERTICAL_SEPARATION + this.items[i-1].down + this.items[i-1].height); } } Path(x + 30, y).right(Diagram.ARC_RADIUS).addTo(this); normal.format(x + 30 + Diagram.ARC_RADIUS, y, this.innerWidth).addTo(this); Path(x + 30 + Diagram.ARC_RADIUS + this.innerWidth, y + this.height).right(Diagram.ARC_RADIUS).addTo(this); for(var i = this.normal+1; i < this.items.length; i++) { var item = this.items[i]; if(i == this.normal + 1) { var distanceFromY = Math.max(10+Diagram.ARC_RADIUS, normal.height + normal.down + Diagram.VERTICAL_SEPARATION + item.up); } Path(x + 30, y) .down(distanceFromY - Diagram.ARC_RADIUS) .arc('ws') .addTo(this); item.format(x + 30 + Diagram.ARC_RADIUS, y + distanceFromY, this.innerWidth).addTo(this) Path(x + 30 + Diagram.ARC_RADIUS + this.innerWidth, y + distanceFromY + item.height) .arc('se') .up(distanceFromY - Diagram.ARC_RADIUS + item.height - normal.height) .addTo(this); if(i != this.items.length - 1) { distanceFromY += Math.max(Diagram.ARC_RADIUS, item.height + item.down + Diagram.VERTICAL_SEPARATION + this.items[i+1].up); } } var text = FakeSVG('g', {"class": "diagram-text"}).addTo(this) FakeSVG('title', {}, (this.type=="any"?"take one or more branches, once each, in any order":"take all branches, once each, in any order")).addTo(text) FakeSVG('path', { "d": "M "+(x+30)+" "+(y-10)+" h -26 a 4 4 0 0 0 -4 4 v 12 a 4 4 0 0 0 4 4 h 26 z", "class": "diagram-text" }).addTo(text) FakeSVG('text', { "x": x + 15, "y": y + 4, "class": "diagram-text" }, (this.type=="any"?"1+":"all")).addTo(text) FakeSVG('path', { "d": "M "+(x+this.width-20)+" "+(y-10)+" h 16 a 4 4 0 0 1 4 4 v 12 a 4 4 0 0 1 -4 4 h -16 z", "class": "diagram-text" }).addTo(text) FakeSVG('path', { "d": "M "+(x+this.width-13)+" "+(y-2)+" a 4 4 0 1 0 6 -1 m 2.75 -1 h -4 v 4 m 0 -3 h 2", "style": "stroke-width: 1.75" }).addTo(text) return this; }; var Optional = funcs.Optional = function Optional(item, skip) { if( skip === undefined ) return Choice(1, Skip(), item); else if ( skip === "skip" ) return Choice(0, Skip(), item); else throw "Unknown value for Optional()'s 'skip' argument."; } var OneOrMore = funcs.OneOrMore = function OneOrMore(item, rep) { if(!(this instanceof OneOrMore)) return new OneOrMore(item, rep); FakeSVG.call(this, 'g'); rep = rep || (new Skip); this.item = wrapString(item); this.rep = wrapString(rep); this.width = Math.max(this.item.width, this.rep.width) + Diagram.ARC_RADIUS*2; this.height = this.item.height; this.up = this.item.up; this.down = Math.max(Diagram.ARC_RADIUS*2, this.item.down + Diagram.VERTICAL_SEPARATION + this.rep.up + this.rep.height + this.rep.down); if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "oneormore" } } subclassOf(OneOrMore, FakeSVG); OneOrMore.prototype.needsSpace = true; OneOrMore.prototype.format = function(x,y,width) { // Hook up the two sides if this is narrower than its stated width. var gaps = determineGaps(width, this.width); Path(x,y).h(gaps[0]).addTo(this); Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this); x += gaps[0]; // Draw item Path(x,y).right(Diagram.ARC_RADIUS).addTo(this); this.item.format(x+Diagram.ARC_RADIUS,y,this.width-Diagram.ARC_RADIUS*2).addTo(this); Path(x+this.width-Diagram.ARC_RADIUS,y+this.height).right(Diagram.ARC_RADIUS).addTo(this); // Draw repeat arc var distanceFromY = Math.max(Diagram.ARC_RADIUS*2, this.item.height+this.item.down+Diagram.VERTICAL_SEPARATION+this.rep.up); Path(x+Diagram.ARC_RADIUS,y).arc('nw').down(distanceFromY-Diagram.ARC_RADIUS*2).arc('ws').addTo(this); this.rep.format(x+Diagram.ARC_RADIUS, y+distanceFromY, this.width - Diagram.ARC_RADIUS*2).addTo(this); Path(x+this.width-Diagram.ARC_RADIUS, y+distanceFromY+this.rep.height).arc('se').up(distanceFromY-Diagram.ARC_RADIUS*2+this.rep.height-this.item.height).arc('en').addTo(this); return this; } OneOrMore.prototype.walk = function(cb) { cb(this); this.item.walk(cb); this.rep.walk(cb); } var ZeroOrMore = funcs.ZeroOrMore = function ZeroOrMore(item, rep, skip) { return Optional(OneOrMore(item, rep), skip); } var Start = funcs.Start = function Start({type="simple", label}={}) { if(!(this instanceof Start)) return new Start({type, label}); FakeSVG.call(this, 'g'); this.width = 20; this.height = 0; this.up = 10; this.down = 10; this.type = type; if(label != undefined) { this.label = ""+label; this.width = Math.max(20, this.label.length * Diagram.CHAR_WIDTH + 10); } if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "start" } } subclassOf(Start, FakeSVG); Start.prototype.format = function(x,y) { let path = new Path(x, y-10); if (this.type === "complex") { path.down(20) .m(0, -10) .right(this.width) .addTo(this); } else { path.down(20) .m(10, -20) .down(20) .m(-10, -10) .right(this.width) .addTo(this); } if(this.label) { new FakeSVG('text', {x:x, y:y-15, style:"text-anchor:start"}, this.label).addTo(this); } return this; } var End = funcs.End = function End({type="simple"}={}) { if(!(this instanceof End)) return new End({type}); FakeSVG.call(this, 'path'); this.width = 20; this.height = 0; this.up = 10; this.down = 10; this.type = type; if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "end" } } subclassOf(End, FakeSVG); End.prototype.format = function(x,y) { if (this.type === "complex") { this.attrs.d = 'M '+x+' '+y+' h 20 m 0 -10 v 20'; } else { this.attrs.d = 'M '+x+' '+y+' h 20 m -10 -10 v 20 m 10 -20 v 20'; } return this; } var Terminal = funcs.Terminal = function Terminal(text, {href, title}={}) { if(!(this instanceof Terminal)) return new Terminal(text, {href, title}); FakeSVG.call(this, 'g', {'class': 'terminal'}); this.text = ""+text; this.href = href; this.title = title; this.width = this.text.length * Diagram.CHAR_WIDTH + 20; this.height = 0; this.up = 11; this.down = 11; if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "terminal" } } subclassOf(Terminal, FakeSVG); Terminal.prototype.needsSpace = true; Terminal.prototype.format = function(x, y, width) { // Hook up the two sides if this is narrower than its stated width. var gaps = determineGaps(width, this.width); Path(x,y).h(gaps[0]).addTo(this); Path(x+gaps[0]+this.width,y).h(gaps[1]).addTo(this); x += gaps[0]; FakeSVG('rect', {x:x, y:y-11, width:this.width, height:this.up+this.down, rx:10, ry:10}).addTo(this); var text = FakeSVG('text', {x:x+this.width/2, y:y+4}, this.text); if(this.href) FakeSVG('a', {'xlink:href': this.href}, [text]).addTo(this); else text.addTo(this); if(this.title) new FakeSVG('title', {}, this.title).addTo(this); return this; } var NonTerminal = funcs.NonTerminal = function NonTerminal(text, {href, title}={}) { if(!(this instanceof NonTerminal)) return new NonTerminal(text, {href, title}); FakeSVG.call(this, 'g', {'class': 'non-terminal'}); this.text = ""+text; this.href = href; this.title = title; this.width = this.text.length * Diagram.CHAR_WIDTH + 20; this.height = 0; this.up = 11; this.down = 11; if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "nonterminal" } } subclassOf(NonTerminal, FakeSVG); NonTerminal.prototype.needsSpace = true; NonTerminal.prototype.format = function(x, y, width) { // Hook up the two sides if this is narrower than its stated width. var gaps = determineGaps(width, this.width); Path(x,y).h(gaps[0]).addTo(this); Path(x+gaps[0]+this.width,y).h(gaps[1]).addTo(this); x += gaps[0]; FakeSVG('rect', {x:x, y:y-11, width:this.width, height:this.up+this.down}).addTo(this); var text = FakeSVG('text', {x:x+this.width/2, y:y+4}, this.text); if(this.href) FakeSVG('a', {'xlink:href': this.href}, [text]).addTo(this); else text.addTo(this); if(this.title) new FakeSVG('title', {}, this.title).addTo(this); return this; } var Comment = funcs.Comment = function Comment(text, {href, title}={}) { if(!(this instanceof Comment)) return new Comment(text, {href, title}); FakeSVG.call(this, 'g'); this.text = ""+text; this.href = href; this.title = title; this.width = this.text.length * Diagram.COMMENT_CHAR_WIDTH + 10; this.height = 0; this.up = 11; this.down = 11; if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "comment" } } subclassOf(Comment, FakeSVG); Comment.prototype.needsSpace = true; Comment.prototype.format = function(x, y, width) { // Hook up the two sides if this is narrower than its stated width. var gaps = determineGaps(width, this.width); Path(x,y).h(gaps[0]).addTo(this); Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this); x += gaps[0]; var text = FakeSVG('text', {x:x+this.width/2, y:y+5, class:'comment'}, this.text); if(this.href) FakeSVG('a', {'xlink:href': this.href}, [text]).addTo(this); else text.addTo(this); if(this.title) new FakeSVG('title', {}, this.title).addTo(this); return this; } var Skip = funcs.Skip = function Skip() { if(!(this instanceof Skip)) return new Skip(); FakeSVG.call(this, 'g'); this.width = 0; this.height = 0; this.up = 0; this.down = 0; if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down this.attrs['data-type'] = "skip" } } subclassOf(Skip, FakeSVG); Skip.prototype.format = function(x, y, width) { Path(x,y).right(width).addTo(this); return this; } var Block = funcs.Block = function Block({width=50, up=15, height=25, down=15, needsSpace=true}={}) { if(!(this instanceof Block)) return new Block({width, up, height, down, needsSpace}); FakeSVG.call(this, 'g'); this.width = width; this.height = height; this.up = up; this.down = down; this.needsSpace = true; if(Diagram.DEBUG) { this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down; this.attrs['data-type'] = "block" } } subclassOf(Block, FakeSVG); Block.prototype.format = function(x, y, width) { // Hook up the two sides if this is narrower than its stated width. var gaps = determineGaps(width, this.width); new Path(x,y).h(gaps[0]).addTo(this); new Path(x+gaps[0]+this.width,y).h(gaps[1]).addTo(this); x += gaps[0]; new FakeSVG('rect', {x:x, y:y-this.up, width:this.width, height:this.up+this.height+this.down}).addTo(this); return this; } var root; if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. root = {}; define([], function() { return root; }); } else if (typeof exports === 'object') { // CommonJS for node root = exports; } else { // Browser globals (root is window) root = this; } for(var name in funcs) { root[name] = funcs[name]; } }).call(this, { VERTICAL_SEPARATION: 8, ARC_RADIUS: 10, DIAGRAM_CLASS: 'railroad-diagram', STROKE_ODD_PIXEL_LENGTH: true, INTERNAL_ALIGNMENT: 'center', CHAR_WIDTH: 8.5, // width of each monospace character. play until you find the right value for your font COMMENT_CHAR_WIDTH: 7, // comments are in smaller text by default } ); golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/main.go000066400000000000000000000123131505300366400246570ustar00rootroot00000000000000// Package main generates Railroad Diagrams from Participle grammar EBNF. package main import ( "embed" "flag" "fmt" "os" "github.com/alecthomas/repr" "github.com/alecthomas/participle/v2/ebnf" ) const ( mergeRefThreshold = -1 mergeSizeThreshold = 0 ) type production struct { *ebnf.Production refs int size int } // Embed the railroad-diagrams css and js files for later output. // From here: https://github.com/tabatkins/railroad-diagrams // //go:embed assets/* var assets embed.FS func generate(productions map[string]*production, n ebnf.Node) (s string) { switch n := n.(type) { case *ebnf.EBNF: s += ` ` for _, p := range n.Productions { s += generate(productions, p) + "\n" } s += "\n" case *ebnf.Production: if productions[n.Production].refs <= mergeRefThreshold { break } s += `

` + n.Production + "

\n" s += "\n" case *ebnf.Expression: s += "Choice(0, " for i, a := range n.Alternatives { if i > 0 { s += ", " } s += generate(productions, a) } s += ")" case *ebnf.SubExpression: s += generate(productions, n.Expr) if n.Lookahead != ebnf.LookaheadAssertionNone { s = fmt.Sprintf(`Group(%s, "?%c")`, s, n.Lookahead) } case *ebnf.Sequence: s += "Sequence(" for i, t := range n.Terms { if i > 0 { s += ", " } s += generate(productions, t) } s += ")" case *ebnf.Term: switch n.Repetition { case "*": s += "ZeroOrMore(" case "+": s += "OneOrMore(" case "?": s += "Optional(" } switch { case n.Name != "": p := productions[n.Name] if p.refs > mergeRefThreshold { s += fmt.Sprintf("NonTerminal(%q, {href:\"#%s\"})", n.Name, n.Name) } else { s += generate(productions, p.Expression) } case n.Group != nil: s += generate(productions, n.Group) case n.Literal != "": s += fmt.Sprintf("Terminal(%s)", n.Literal) case n.Token != "": s += fmt.Sprintf("NonTerminal(%q)", n.Token) default: panic(repr.String(n)) } if n.Repetition != "" { s += ")" } if n.Negation { s = fmt.Sprintf(`Group(%s, "~")`, s) } default: panic(repr.String(n)) } return } func countProductions(productions map[string]*production, n ebnf.Node) (size int) { switch n := n.(type) { case *ebnf.EBNF: for _, p := range n.Productions { productions[p.Production] = &production{Production: p} } for _, p := range n.Productions { countProductions(productions, p) } for _, p := range n.Productions { if productions[p.Production].size <= mergeSizeThreshold { productions[p.Production].refs = mergeRefThreshold } } case *ebnf.Production: productions[n.Production].size = countProductions(productions, n.Expression) case *ebnf.Expression: for _, a := range n.Alternatives { size += countProductions(productions, a) } case *ebnf.SubExpression: size += countProductions(productions, n.Expr) case *ebnf.Sequence: for _, t := range n.Terms { size += countProductions(productions, t) } case *ebnf.Term: if n.Name != "" { productions[n.Name].refs++ size++ } else if n.Group != nil { size += countProductions(productions, n.Group) } else { size++ } default: panic(repr.String(n)) } return } func main() { fmt.Fprintln(os.Stderr, "Generates railroad diagrams from a Participle EBNF grammar on stdin.") fmt.Fprintln(os.Stderr, " (EBNF is available from .String() on your parser)") fmt.Fprintln(os.Stderr, " (Use control-D to end input)") help := flag.Bool("h", false, "output help and quit") writeAssets := flag.Bool("w", false, "write css and js files") outputFile := flag.String("o", "", "file to write html to") flag.Parse() if *help { flag.PrintDefaults() os.Exit(0) } ast, err := ebnf.Parse(os.Stdin) if err != nil { panic(err) } productions := map[string]*production{} countProductions(productions, ast) str := generate(productions, ast) if *outputFile != "" { err := os.WriteFile(*outputFile, []byte(str), 0644) // nolint if err != nil { panic(err) } if *writeAssets { err := writeAssetFiles() if err != nil { panic(err) } } else { fmt.Fprintln(os.Stderr, ">>> Copy railroad-diagrams.{css,js} from https://github.com/tabatkins/railroad-diagrams") } fmt.Fprintf(os.Stderr, ">>> File written: %s\n", *outputFile) } else { fmt.Println(str) fmt.Fprintln(os.Stderr, ">>> Copy railroad-diagrams.{css,js} from https://github.com/tabatkins/railroad-diagrams") } } func writeAssetFiles() (err error) { files, err := assets.ReadDir("assets") if err != nil { return } for _, f := range files { fileName := f.Name() data, err := assets.ReadFile(fmt.Sprintf("assets/%s", fileName)) if err != nil { return err } err = os.WriteFile(fileName, data, 0644) // nolint if err != nil { return err } fmt.Fprintf(os.Stderr, ">>> File written: %s\n", fileName) } return } golang-github-alecthomas-participle-v2-2.1.4/context.go000066400000000000000000000067701505300366400230710ustar00rootroot00000000000000package participle import ( "fmt" "io" "reflect" "strings" "github.com/alecthomas/participle/v2/lexer" ) type contextFieldSet struct { tokens []lexer.Token strct reflect.Value field structLexerField fieldValue []reflect.Value } // Context for a single parse. type parseContext struct { lexer.PeekingLexer depth int trace io.Writer deepestError error deepestErrorDepth int lookahead int caseInsensitive map[lexer.TokenType]bool apply []*contextFieldSet allowTrailing bool } func newParseContext(lex *lexer.PeekingLexer, lookahead int, caseInsensitive map[lexer.TokenType]bool) parseContext { return parseContext{ PeekingLexer: *lex, caseInsensitive: caseInsensitive, lookahead: lookahead, } } func (p *parseContext) DeepestError(err error) error { if p.PeekingLexer.Cursor() >= p.deepestErrorDepth { return err } if p.deepestError != nil { return p.deepestError } return err } // Defer adds a function to be applied once a branch has been picked. func (p *parseContext) Defer(tokens []lexer.Token, strct reflect.Value, field structLexerField, fieldValue []reflect.Value) { p.apply = append(p.apply, &contextFieldSet{tokens, strct, field, fieldValue}) } // Apply deferred functions. func (p *parseContext) Apply() error { for _, apply := range p.apply { if err := setField(apply.tokens, apply.strct, apply.field, apply.fieldValue); err != nil { return err } } p.apply = nil return nil } // Branch accepts the branch as the correct branch. func (p *parseContext) Accept(branch *parseContext) { p.apply = append(p.apply, branch.apply...) p.PeekingLexer = branch.PeekingLexer if branch.deepestErrorDepth >= p.deepestErrorDepth { p.deepestErrorDepth = branch.deepestErrorDepth p.deepestError = branch.deepestError } } // Branch starts a new lookahead branch. func (p *parseContext) Branch() *parseContext { branch := &parseContext{} *branch = *p branch.apply = nil return branch } func (p *parseContext) MaybeUpdateError(err error) { if p.PeekingLexer.Cursor() >= p.deepestErrorDepth { p.deepestError = err p.deepestErrorDepth = p.PeekingLexer.Cursor() } } // Stop returns true if parsing should terminate after the given "branch" failed to match. // // Additionally, track the deepest error in the branch - the deeper the error, the more useful it usually is. // It could already be the deepest error in the branch (only if deeper than current parent context deepest), // or it could be "err", the latest error on the branch (even if same depth; the lexer holds the position). func (p *parseContext) Stop(err error, branch *parseContext) bool { if branch.deepestErrorDepth > p.deepestErrorDepth { p.deepestError = branch.deepestError p.deepestErrorDepth = branch.deepestErrorDepth } else if branch.PeekingLexer.Cursor() >= p.deepestErrorDepth { p.deepestError = err p.deepestErrorDepth = maxInt(branch.PeekingLexer.Cursor(), branch.deepestErrorDepth) } if !p.hasInfiniteLookahead() && branch.PeekingLexer.Cursor() > p.PeekingLexer.Cursor()+p.lookahead { p.Accept(branch) return true } return false } func (p *parseContext) hasInfiniteLookahead() bool { return p.lookahead < 0 } func (p *parseContext) printTrace(n node) func() { if p.trace != nil { tok := p.PeekingLexer.Peek() fmt.Fprintf(p.trace, "%s%q %s\n", strings.Repeat(" ", p.depth*2), tok, n.GoString()) p.depth += 1 return func() { p.depth -= 1 } } return func() {} } func maxInt(a, b int) int { if a > b { return a } return b } golang-github-alecthomas-participle-v2-2.1.4/doc.go000066400000000000000000000041241505300366400221410ustar00rootroot00000000000000// Package participle constructs parsers from definitions in struct tags and parses directly into // those structs. The approach is philosophically similar to how other marshallers work in Go, // "unmarshalling" an instance of a grammar into a struct. // // The supported annotation syntax is: // // - `@` Capture expression into the field. // - `@@` Recursively capture using the fields own type. // - `` Match named lexer token. // - `( ... )` Group. // - `"..."` Match the literal (note that the lexer must emit tokens matching this literal exactly). // - `"...":` Match the literal, specifying the exact lexer token type to match. // - ` ...` Match expressions. // - ` | ` Match one of the alternatives. // // The following modifiers can be used after any expression: // // - `*` Expression can match zero or more times. // - `+` Expression must match one or more times. // - `?` Expression can match zero or once. // - `!` Require a non-empty match (this is useful with a sequence of optional matches eg. `("a"? "b"? "c"?)!`). // // Here's an example of an EBNF grammar. // // type Group struct { // Expression *Expression `"(" @@ ")"` // } // // type Option struct { // Expression *Expression `"[" @@ "]"` // } // // type Repetition struct { // Expression *Expression `"{" @@ "}"` // } // // type Literal struct { // Start string `@String` // lexer.Lexer token "String" // End string `("…" @String)?` // } // // type Term struct { // Name string ` @Ident` // Literal *Literal `| @@` // Group *Group `| @@` // Option *Option `| @@` // Repetition *Expression `| "(" @@ ")"` // } // // type Sequence struct { // Terms []*Term `@@+` // } // // type Expression struct { // Alternatives []*Sequence `@@ ("|" @@)*` // } // // type Expressions []*Expression // // type Production struct { // Name string `@Ident "="` // Expressions Expressions `@@+ "."` // } // // type EBNF struct { // Productions []*Production `@@*` // } package participle golang-github-alecthomas-participle-v2-2.1.4/ebnf.go000066400000000000000000000060241505300366400223070ustar00rootroot00000000000000package participle import ( "fmt" "strings" ) // String returns the EBNF for the grammar. // // Productions are always upper cased. Lexer tokens are always lower case. func (p *Parser[G]) String() string { return ebnf(p.typeNodes[p.rootType]) } type ebnfp struct { name string out string } func ebnf(n node) string { outp := []*ebnfp{} switch n.(type) { case *strct: buildEBNF(true, n, map[node]bool{}, nil, &outp) out := []string{} for _, p := range outp { out = append(out, fmt.Sprintf("%s = %s .", p.name, p.out)) } return strings.Join(out, "\n") default: out := &ebnfp{} buildEBNF(true, n, map[node]bool{}, out, &outp) return out.out } } func buildEBNF(root bool, n node, seen map[node]bool, p *ebnfp, outp *[]*ebnfp) { switch n := n.(type) { case *disjunction: if !root { p.out += "(" } for i, next := range n.nodes { if i > 0 { p.out += " | " } buildEBNF(false, next, seen, p, outp) } if !root { p.out += ")" } case *union: name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:] if p != nil { p.out += name } if seen[n] { return } p = &ebnfp{name: name} *outp = append(*outp, p) seen[n] = true for i, next := range n.disjunction.nodes { if i > 0 { p.out += " | " } buildEBNF(false, next, seen, p, outp) } case *custom: name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:] p.out += name case *strct: name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:] if p != nil { p.out += name } if seen[n] { return } seen[n] = true p = &ebnfp{name: name} *outp = append(*outp, p) buildEBNF(true, n.expr, seen, p, outp) case *sequence: group := n.next != nil && !root if group { p.out += "(" } for n != nil { buildEBNF(false, n.node, seen, p, outp) n = n.next if n != nil { p.out += " " } } if group { p.out += ")" } case *parseable: p.out += n.t.Name() case *capture: buildEBNF(false, n.node, seen, p, outp) case *reference: p.out += "<" + strings.ToLower(n.identifier) + ">" case *negation: p.out += "~" buildEBNF(false, n.node, seen, p, outp) case *literal: p.out += fmt.Sprintf("%q", n.s) case *group: if child, ok := n.expr.(*group); ok && child.mode == groupMatchOnce { buildEBNF(false, child.expr, seen, p, outp) } else if child, ok := n.expr.(*capture); ok { if grandchild, ok := child.node.(*group); ok && grandchild.mode == groupMatchOnce { buildEBNF(false, grandchild.expr, seen, p, outp) } else { buildEBNF(false, n.expr, seen, p, outp) } } else { buildEBNF(false, n.expr, seen, p, outp) } switch n.mode { case groupMatchNonEmpty: p.out += "!" case groupMatchZeroOrOne: p.out += "?" case groupMatchZeroOrMore: p.out += "*" case groupMatchOneOrMore: p.out += "+" case groupMatchOnce: } case *lookaheadGroup: if !n.negative { p.out += "(?= " } else { p.out += "(?! " } buildEBNF(true, n.expr, seen, p, outp) p.out += ")" default: panic(fmt.Sprintf("unsupported node type %T", n)) } } golang-github-alecthomas-participle-v2-2.1.4/ebnf/000077500000000000000000000000001505300366400217565ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/ebnf/ebnf.go000066400000000000000000000067531505300366400232320ustar00rootroot00000000000000// Package ebnf contains the AST and parser for parsing the form of EBNF produced by Participle. // // The self-referential EBNF is: // // EBNF = Production* . // Production = "=" Expression "." . // Expression = Sequence ("|" Sequence)* . // SubExpression = "(" ("?!" | "?=")? Expression ")" . // Sequence = Term+ . // Term = "~"? ( | | ("<" ">") | SubExpression) ("*" | "+" | "?" | "!")? . package ebnf import ( "fmt" "io" "github.com/alecthomas/participle/v2" ) var parser = participle.MustBuild[EBNF]() // A Node in the EBNF grammar. type Node interface { sealed() } var _ Node = &Term{} // Term in the EBNF grammar. type Term struct { Negation bool `@("~")?` Name string `( @Ident` Literal string ` | @String` Token string ` | "<" @Ident ">"` Group *SubExpression ` | @@ )` Repetition string `@("*" | "+" | "?" | "!")?` } func (t *Term) sealed() {} func (t *Term) String() string { switch { case t.Name != "": return t.Name + t.Repetition case t.Literal != "": return t.Literal + t.Repetition case t.Token != "": return "<" + t.Token + ">" + t.Repetition case t.Group != nil: return t.Group.String() + t.Repetition default: panic("??") } } // LookaheadAssertion enum. type LookaheadAssertion rune func (l *LookaheadAssertion) sealed() {} func (l *LookaheadAssertion) Capture(tokens []string) error { // nolint rn := tokens[0][0] switch rn { case '!', '=': *l = LookaheadAssertion(rn) default: panic(rn) } return nil } // Lookahead assertion enums. const ( LookaheadAssertionNone LookaheadAssertion = 0 LookaheadAssertionNegative LookaheadAssertion = '!' LookaheadAssertionPositive LookaheadAssertion = '=' ) var _ Node = &SubExpression{} // SubExpression is an expression inside parentheses ( ... ) type SubExpression struct { Lookahead LookaheadAssertion `"(" ("?" @("!" | "="))?` Expr *Expression `@@ ")"` } func (s *SubExpression) sealed() {} func (s *SubExpression) String() string { out := "(" if s.Lookahead != LookaheadAssertionNone { out += "?" + string(s.Lookahead) } out += s.Expr.String() + ")" return out } var _ Node = &Sequence{} // A Sequence of terms. type Sequence struct { Terms []*Term `@@+` } func (s *Sequence) sealed() {} func (s *Sequence) String() (out string) { for i, term := range s.Terms { if i > 0 { out += " " } out += term.String() } return } var _ Node = &Expression{} // Expression is a set of alternatives separated by "|" in the EBNF. type Expression struct { Alternatives []*Sequence `@@ ( "|" @@ )*` } func (e *Expression) sealed() {} func (e *Expression) String() (out string) { for i, seq := range e.Alternatives { if i > 0 { out += " | " } out += seq.String() } return } var _ Node = &Production{} // Production of the grammar. type Production struct { Production string `@Ident "="` Expression *Expression `@@ "."` } func (p *Production) sealed() {} var _ Node = &EBNF{} // EBNF itself. type EBNF struct { Productions []*Production `@@*` } func (e *EBNF) sealed() {} func (e *EBNF) String() (out string) { for i, production := range e.Productions { out += fmt.Sprintf("%s = %s .", production.Production, production.Expression) if i < len(e.Productions)-1 { out += "\n" } } return } // ParseString string into EBNF. func ParseString(ebnf string) (*EBNF, error) { return parser.ParseString("", ebnf) } // Parse io.Reader into EBNF. func Parse(r io.Reader) (*EBNF, error) { return parser.Parse("", r) } golang-github-alecthomas-participle-v2-2.1.4/ebnf/ebnf_test.go000066400000000000000000000004001505300366400242500ustar00rootroot00000000000000package ebnf import ( "testing" require "github.com/alecthomas/assert/v2" ) func TestEBNF(t *testing.T) { input := parser.String() t.Log(input) ast, err := ParseString(input) require.NoError(t, err, input) require.Equal(t, input, ast.String()) } golang-github-alecthomas-participle-v2-2.1.4/ebnf_test.go000066400000000000000000000034721505300366400233520ustar00rootroot00000000000000package participle_test import ( "strings" "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2" ) func TestEBNF(t *testing.T) { parser := mustTestParser[EBNF](t) expected := ` EBNF = Production* . Production = "=" Expression+ "." . Expression = Sequence ("|" Sequence)* . Sequence = Term+ . Term = | Literal | Range | Group | LookaheadGroup | EBNFOption | Repetition | Negation . Literal = . Range = "…" . Group = "(" Expression ")" . LookaheadGroup = "(" "?" ("=" | "!") Expression ")" . EBNFOption = "[" Expression "]" . Repetition = "{" Expression "}" . Negation = "!" Expression . ` require.Equal(t, strings.TrimSpace(expected), parser.String()) } func TestEBNF_Other(t *testing.T) { type Grammar struct { PositiveLookahead string ` (?= 'good') @Ident` NegativeLookahead string `| (?! 'bad' | "worse") @Ident` Negation string `| !("anything" | 'but')` } parser := mustTestParser[Grammar](t) expected := `Grammar = ((?= "good") ) | ((?! "bad" | "worse") ) | ~("anything" | "but") .` require.Equal(t, expected, parser.String()) } type ( EBNFUnion interface{ ebnfUnion() } EBNFUnionA struct { A string `@Ident` } EBNFUnionB struct { B string `@String` } EBNFUnionC struct { C string `@Float` } ) func (EBNFUnionA) ebnfUnion() {} func (EBNFUnionB) ebnfUnion() {} func (EBNFUnionC) ebnfUnion() {} func TestEBNF_Union(t *testing.T) { type Grammar struct { TheUnion EBNFUnion `@@` } parser := mustTestParser[Grammar](t, participle.Union[EBNFUnion](EBNFUnionA{}, EBNFUnionB{}, EBNFUnionC{})) require.Equal(t, strings.TrimSpace(` Grammar = EBNFUnion . EBNFUnion = EBNFUnionA | EBNFUnionB | EBNFUnionC . EBNFUnionA = . EBNFUnionB = . EBNFUnionC = . `), parser.String()) } golang-github-alecthomas-participle-v2-2.1.4/error.go000066400000000000000000000060371505300366400225320ustar00rootroot00000000000000package participle import ( "fmt" "github.com/alecthomas/participle/v2/lexer" ) // Error represents an error while parsing. // // The format of an Error is in the form "[:][::] ". // // The error will contain positional information if available. type Error interface { error // Unadorned message. Message() string // Closest position to error location. Position() lexer.Position } // FormatError formats an error in the form "[:][::] " func FormatError(err Error) string { msg := "" pos := err.Position() if pos.Filename != "" { msg += pos.Filename + ":" } if pos.Line != 0 || pos.Column != 0 { msg += fmt.Sprintf("%d:%d:", pos.Line, pos.Column) } if msg != "" { msg += " " + err.Message() } else { msg = err.Message() } return msg } // UnexpectedTokenError is returned by Parse when an unexpected token is encountered. // // This is useful for composing parsers in order to detect when a sub-parser has terminated. type UnexpectedTokenError struct { Unexpected lexer.Token Expect string expectNode node // Usable instead of Expect, delays creating the string representation until necessary } func (u *UnexpectedTokenError) Error() string { return FormatError(u) } func (u *UnexpectedTokenError) Message() string { // nolint: golint var expected string if u.expectNode != nil { expected = fmt.Sprintf(" (expected %s)", u.expectNode) } else if u.Expect != "" { expected = fmt.Sprintf(" (expected %s)", u.Expect) } return fmt.Sprintf("unexpected token %q%s", u.Unexpected, expected) } func (u *UnexpectedTokenError) Position() lexer.Position { return u.Unexpected.Pos } // nolint: golint // ParseError is returned when a parse error occurs. // // It is useful for differentiating between parse errors and other errors such // as lexing and IO errors. type ParseError struct { Msg string Pos lexer.Position } func (p *ParseError) Error() string { return FormatError(p) } func (p *ParseError) Message() string { return p.Msg } func (p *ParseError) Position() lexer.Position { return p.Pos } // Errorf creates a new Error at the given position. func Errorf(pos lexer.Position, format string, args ...interface{}) Error { return &ParseError{Msg: fmt.Sprintf(format, args...), Pos: pos} } type wrappingParseError struct { err error ParseError } func (w *wrappingParseError) Unwrap() error { return w.err } // Wrapf attempts to wrap an existing error in a new message. // // If "err" is a participle.Error, its positional information will be used and // "pos" will be ignored. // // The returned error implements the Unwrap() method supported by the errors package. func Wrapf(pos lexer.Position, err error, format string, args ...interface{}) Error { var msg string if perr, ok := err.(Error); ok { pos = perr.Position() msg = fmt.Sprintf("%s: %s", fmt.Sprintf(format, args...), perr.Message()) } else { msg = fmt.Sprintf("%s: %s", fmt.Sprintf(format, args...), err.Error()) } return &wrappingParseError{err: err, ParseError: ParseError{Msg: msg, Pos: pos}} } golang-github-alecthomas-participle-v2-2.1.4/error_test.go000066400000000000000000000050301505300366400235610ustar00rootroot00000000000000package participle_test import ( "errors" "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) func TestErrorReporting(t *testing.T) { type cls struct { Visibility string `@"public"?` Class string `"class" @Ident` Bases []string `('(' @Ident (',' @Ident)+ ')')?` } type union struct { Visibility string `@"public"?` Union string `"union" @Ident` } type decl struct { Class *cls `( @@` Union *union ` | @@ )` } type grammar struct { Decls []*decl `( @@ ";" )*` } p := mustTestParser[grammar](t, participle.UseLookahead(5)) ast, err := p.ParseString("", `public class A(B, C); class D; public union A;`) require.NoError(t, err) require.Equal(t, &grammar{Decls: []*decl{ {Class: &cls{Visibility: "public", Class: "A", Bases: []string{"B", "C"}}}, {Class: &cls{Class: "D"}}, {Union: &union{Visibility: "public", Union: "A"}}, }}, ast) _, err = p.ParseString("", `public struct Bar;`) require.EqualError(t, err, `1:8: unexpected token "struct" (expected "union" )`) _, err = p.ParseString("", `public class 1;`) require.EqualError(t, err, `1:14: unexpected token "1" (expected ("(" ("," )+ ")")?)`) _, err = p.ParseString("", `public class A(B,C,);`) require.EqualError(t, err, `1:20: unexpected token ")" (expected )`) } func TestMoreThanOneErrors(t *testing.T) { type unionMatchAtLeastOnce struct { Ident string `( @Ident ` String string `| @String+ ` Float float64 `| @Float )` } type union struct { Ident string `( @Ident ` String string `| @String ` Float float64 `| @Float )` } pAtLeastOnce := mustTestParser[unionMatchAtLeastOnce](t, participle.Unquote("String")) p := mustTestParser[union](t, participle.Unquote("String")) ast, err := pAtLeastOnce.ParseString("", `"a string" "two strings"`) require.NoError(t, err) require.Equal(t, &unionMatchAtLeastOnce{String: "a stringtwo strings"}, ast) _, err = p.ParseString("", `102`) require.EqualError(t, err, `1:1: unexpected token "102"`) _, err = pAtLeastOnce.ParseString("", `102`) // ensure we don't get a "+1:1: sub-expression + must match at least once" error require.EqualError(t, err, `1:1: unexpected token "102"`) } func TestErrorWrap(t *testing.T) { expected := errors.New("badbad") err := participle.Wrapf(lexer.Position{Line: 1, Column: 1}, expected, "bad: %s", "thing") require.Equal(t, expected, errors.Unwrap(err)) require.Equal(t, "1:1: bad: thing: badbad", err.Error()) } golang-github-alecthomas-participle-v2-2.1.4/go.mod000066400000000000000000000003061505300366400221510ustar00rootroot00000000000000module github.com/alecthomas/participle/v2 go 1.18 require ( github.com/alecthomas/assert/v2 v2.11.0 github.com/alecthomas/repr v0.4.0 ) require github.com/hexops/gotextdiff v1.0.3 // indirect golang-github-alecthomas-participle-v2-2.1.4/go.sum000066400000000000000000000013061505300366400221770ustar00rootroot00000000000000github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= golang-github-alecthomas-participle-v2-2.1.4/grammar.go000066400000000000000000000252661505300366400230340ustar00rootroot00000000000000package participle import ( "fmt" "reflect" "text/scanner" "github.com/alecthomas/participle/v2/lexer" ) type generatorContext struct { lexer.Definition typeNodes map[reflect.Type]node symbolsToIDs map[lexer.TokenType]string } func newGeneratorContext(lex lexer.Definition) *generatorContext { return &generatorContext{ Definition: lex, typeNodes: map[reflect.Type]node{}, symbolsToIDs: lexer.SymbolsByRune(lex), } } func (g *generatorContext) addUnionDefs(defs []unionDef) error { unionNodes := make([]*union, len(defs)) for i, def := range defs { if _, exists := g.typeNodes[def.typ]; exists { return fmt.Errorf("duplicate definition for interface or union type %s", def.typ) } unionNode := &union{ unionDef: def, disjunction: disjunction{nodes: make([]node, 0, len(def.members))}, } g.typeNodes[def.typ], unionNodes[i] = unionNode, unionNode } for i, def := range defs { unionNode := unionNodes[i] for _, memberType := range def.members { memberNode, err := g.parseType(memberType) if err != nil { return err } unionNode.disjunction.nodes = append(unionNode.disjunction.nodes, memberNode) } } return nil } func (g *generatorContext) addCustomDefs(defs []customDef) error { for _, def := range defs { if _, exists := g.typeNodes[def.typ]; exists { return fmt.Errorf("duplicate definition for interface or union type %s", def.typ) } g.typeNodes[def.typ] = &custom{typ: def.typ, parseFn: def.parseFn} } return nil } // Takes a type and builds a tree of nodes out of it. func (g *generatorContext) parseType(t reflect.Type) (_ node, returnedError error) { t = indirectType(t) if n, ok := g.typeNodes[t]; ok { if s, ok := n.(*strct); ok { s.usages++ } return n, nil } if t.Implements(parseableType) { return &parseable{t.Elem()}, nil } if reflect.PtrTo(t).Implements(parseableType) { return &parseable{t}, nil } switch t.Kind() { // nolint: exhaustive case reflect.Slice, reflect.Ptr: t = indirectType(t.Elem()) if t.Kind() != reflect.Struct { return nil, fmt.Errorf("expected a struct but got %T", t) } fallthrough case reflect.Struct: slexer, err := lexStruct(t) if err != nil { return nil, err } out := newStrct(t) g.typeNodes[t] = out // Ensure we avoid infinite recursion. if slexer.NumField() == 0 { return nil, fmt.Errorf("can not parse into empty struct %s", t) } defer decorate(&returnedError, func() string { return slexer.Field().Name }) e, err := g.parseDisjunction(slexer) if err != nil { return nil, err } if e == nil { return nil, fmt.Errorf("no grammar found in %s", t) } if token, _ := slexer.Peek(); !token.EOF() { return nil, fmt.Errorf("unexpected input %q", token.Value) } out.expr = e return out, nil } return nil, fmt.Errorf("%s should be a struct or should implement the Parseable interface", t) } func (g *generatorContext) parseDisjunction(slexer *structLexer) (node, error) { out := &disjunction{} for { n, err := g.parseSequence(slexer) if err != nil { return nil, err } if n == nil { return nil, fmt.Errorf("alternative expression %d cannot be empty", len(out.nodes)+1) } out.nodes = append(out.nodes, n) if token, _ := slexer.Peek(); token.Type != '|' { break } _, err = slexer.Next() // | if err != nil { return nil, err } } if len(out.nodes) == 1 { return out.nodes[0], nil } return out, nil } func (g *generatorContext) parseSequence(slexer *structLexer) (node, error) { head := &sequence{} cursor := head loop: for { if token, err := slexer.Peek(); err != nil { return nil, err } else if token.Type == lexer.EOF { break loop } term, err := g.parseTerm(slexer, true) if err != nil { return nil, err } if term == nil { break loop } if cursor.node == nil { cursor.head = true cursor.node = term } else { cursor.next = &sequence{node: term} cursor = cursor.next } } if head.node == nil { return nil, nil } if head.next == nil { return head.node, nil } return head, nil } func (g *generatorContext) parseTermNoModifiers(slexer *structLexer, allowUnknown bool) (node, error) { t, err := slexer.Peek() if err != nil { return nil, err } switch t.Type { case '@': return g.parseCapture(slexer) case scanner.String, scanner.RawString, scanner.Char: return g.parseLiteral(slexer) case '!', '~': return g.parseNegation(slexer) case '[': return g.parseOptional(slexer) case '{': return g.parseRepetition(slexer) case '(': // Also handles (? used for lookahead groups return g.parseGroup(slexer) case scanner.Ident: return g.parseReference(slexer) case lexer.EOF: _, _ = slexer.Next() return nil, nil default: if allowUnknown { return nil, nil } return nil, fmt.Errorf("unexpected token %v", t) } } func (g *generatorContext) parseTerm(slexer *structLexer, allowUnknown bool) (node, error) { out, err := g.parseTermNoModifiers(slexer, allowUnknown) if err != nil { return nil, err } return g.parseModifier(slexer, out) } // Parse modifiers: ?, *, + and/or ! func (g *generatorContext) parseModifier(slexer *structLexer, expr node) (node, error) { out := &group{expr: expr} t, err := slexer.Peek() if err != nil { return nil, err } switch t.Type { case '!': out.mode = groupMatchNonEmpty case '+': out.mode = groupMatchOneOrMore case '*': out.mode = groupMatchZeroOrMore case '?': out.mode = groupMatchZeroOrOne default: return expr, nil } _, _ = slexer.Next() return out, nil } // @ captures into the current field. func (g *generatorContext) parseCapture(slexer *structLexer) (node, error) { _, _ = slexer.Next() token, err := slexer.Peek() if err != nil { return nil, err } field := slexer.Field() if token.Type == '@' { _, _ = slexer.Next() n, err := g.parseType(field.Type) if err != nil { return nil, err } return &capture{field, n}, nil } ft := indirectType(field.Type) if ft.Kind() == reflect.Struct && ft != tokenType && ft != tokensType && !implements(ft, captureType) && !implements(ft, textUnmarshalerType) { return nil, fmt.Errorf("%s: structs can only be parsed with @@ or by implementing the Capture or encoding.TextUnmarshaler interfaces", ft) } n, err := g.parseTermNoModifiers(slexer, false) if err != nil { return nil, err } return &capture{field, n}, nil } // A reference in the form refers to a named token from the lexer. func (g *generatorContext) parseReference(slexer *structLexer) (node, error) { // nolint: interfacer token, err := slexer.Next() if err != nil { return nil, err } if token.Type != scanner.Ident { return nil, fmt.Errorf("expected identifier but got %q", token) } typ, ok := g.Symbols()[token.Value] if !ok { return nil, fmt.Errorf("unknown token type %q", token) } return &reference{typ: typ, identifier: token.Value}, nil } // [ ] optionally matches . func (g *generatorContext) parseOptional(slexer *structLexer) (node, error) { _, _ = slexer.Next() // [ disj, err := g.parseDisjunction(slexer) if err != nil { return nil, err } n := &group{expr: disj, mode: groupMatchZeroOrOne} next, err := slexer.Next() if err != nil { return nil, err } if next.Type != ']' { return nil, fmt.Errorf("expected ] but got %q", next) } return n, nil } // { } matches 0 or more repititions of func (g *generatorContext) parseRepetition(slexer *structLexer) (node, error) { _, _ = slexer.Next() // { disj, err := g.parseDisjunction(slexer) if err != nil { return nil, err } n := &group{expr: disj, mode: groupMatchZeroOrMore} next, err := slexer.Next() if err != nil { return nil, err } if next.Type != '}' { return nil, fmt.Errorf("expected } but got %q", next) } return n, nil } // ( ) groups a sub-expression func (g *generatorContext) parseGroup(slexer *structLexer) (node, error) { _, _ = slexer.Next() // ( peek, err := slexer.Peek() if err != nil { return nil, err } if peek.Type == '?' { return g.subparseLookaheadGroup(slexer) // If there was an error peeking, code below will handle it } expr, err := g.subparseGroup(slexer) if err != nil { return nil, err } return &group{expr: expr}, nil } // (?[!=] ) requires a grouped sub-expression either matches or doesn't match, without consuming it func (g *generatorContext) subparseLookaheadGroup(slexer *structLexer) (node, error) { _, _ = slexer.Next() // ? - the opening ( was already consumed in parseGroup var negative bool next, err := slexer.Next() if err != nil { return nil, err } switch next.Type { case '=': negative = false case '!': negative = true default: return nil, fmt.Errorf("expected = or ! but got %q", next) } expr, err := g.subparseGroup(slexer) if err != nil { return nil, err } return &lookaheadGroup{expr: expr, negative: negative}, nil } // helper parsing ) to finish parsing groups or lookahead groups func (g *generatorContext) subparseGroup(slexer *structLexer) (node, error) { disj, err := g.parseDisjunction(slexer) if err != nil { return nil, err } next, err := slexer.Next() // ) if err != nil { return nil, err } if next.Type != ')' { return nil, fmt.Errorf("expected ) but got %q", next) } return disj, nil } // A token negation // // Accepts both the form !"some-literal" and !SomeNamedToken func (g *generatorContext) parseNegation(slexer *structLexer) (node, error) { _, _ = slexer.Next() // advance the parser since we have '!' right now. next, err := g.parseTermNoModifiers(slexer, false) if err != nil { return nil, err } return &negation{next}, nil } // A literal string. // // Note that for this to match, the tokeniser must be able to produce this string. For example, // if the tokeniser only produces individual characters but the literal is "hello", or vice versa. func (g *generatorContext) parseLiteral(lex *structLexer) (node, error) { // nolint: interfacer token, err := lex.Next() if err != nil { return nil, err } s := token.Value t := lexer.TokenType(-1) token, err = lex.Peek() if err != nil { return nil, err } if token.Type == ':' { _, _ = lex.Next() token, err = lex.Next() if err != nil { return nil, err } if token.Type != scanner.Ident { return nil, fmt.Errorf("expected identifier for literal type constraint but got %q", token) } var ok bool t, ok = g.Symbols()[token.Value] if !ok { return nil, fmt.Errorf("unknown token type %q in literal type constraint", token) } } return &literal{s: s, t: t, tt: g.symbolsToIDs[t]}, nil } func indirectType(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice { return indirectType(t.Elem()) } return t } func implements(t, i reflect.Type) bool { return t.Implements(i) || reflect.PtrTo(t).Implements(i) } golang-github-alecthomas-participle-v2-2.1.4/grammar_test.go000066400000000000000000000033751505300366400240700ustar00rootroot00000000000000package participle_test import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2" ) func TestBuild_Errors_Negation(t *testing.T) { type grammar struct { Whatever string `'a' | ! | 'b'` } _, err := participle.Build[grammar]() require.EqualError(t, err, "Whatever: unexpected token |") } func TestBuild_Errors_Capture(t *testing.T) { type grammar struct { Whatever string `'a' | @ | 'b'` } _, err := participle.Build[grammar]() require.EqualError(t, err, "Whatever: unexpected token |") } func TestBuild_Errors_UnclosedGroup(t *testing.T) { type grammar struct { Whatever string `'a' | ('b' | 'c'` } _, err := participle.Build[grammar]() require.EqualError(t, err, `Whatever: expected ) but got ""`) } func TestBuild_Errors_LookaheadGroup(t *testing.T) { type grammar struct { Whatever string `'a' | (?? 'what') | 'b'` } _, err := participle.Build[grammar]() require.EqualError(t, err, `Whatever: expected = or ! but got "?"`) } func TestBuild_Colon_OK(t *testing.T) { type grammar struct { TokenTypeTest bool ` 'TokenTypeTest' : Ident` DoubleCapture string `| 'DoubleCapture' ":" @Ident` SinglePresent bool `| 'SinglePresent' ':' Ident` SingleCapture string `| 'SingleCapture' ':' @Ident` } parser, err := participle.Build[grammar]() require.NoError(t, err) require.Equal(t, `Grammar = "TokenTypeTest"`+ ` | ("DoubleCapture" ":" )`+ ` | ("SinglePresent" ":" )`+ ` | ("SingleCapture" ":" ) .`, parser.String()) } func TestBuild_Colon_MissingTokenType(t *testing.T) { type grammar struct { Key string `'name' : @Ident` } _, err := participle.Build[grammar]() require.EqualError(t, err, `Key: expected identifier for literal type constraint but got "@"`) } golang-github-alecthomas-participle-v2-2.1.4/lexer/000077500000000000000000000000001505300366400221635ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/lexer/api.go000066400000000000000000000107551505300366400232730ustar00rootroot00000000000000package lexer import ( "fmt" "io" "strings" "unicode/utf8" ) type TokenType int const ( // EOF represents an end of file. EOF TokenType = -(iota + 1) ) // EOFToken creates a new EOF token at the given position. func EOFToken(pos Position) Token { return Token{Type: EOF, Pos: pos} } // Definition is the main entry point for lexing. type Definition interface { // Symbols returns a map of symbolic names to the corresponding pseudo-runes for those symbols. // This is the same approach as used by text/scanner. For example, "EOF" might have the rune // value of -1, "Ident" might be -2, and so on. Symbols() map[string]TokenType // Lex an io.Reader. Lex(filename string, r io.Reader) (Lexer, error) } // StringDefinition is an optional interface lexer Definition's can implement // to offer a fast path for lexing strings. type StringDefinition interface { LexString(filename string, input string) (Lexer, error) } // BytesDefinition is an optional interface lexer Definition's can implement // to offer a fast path for lexing byte slices. type BytesDefinition interface { LexBytes(filename string, input []byte) (Lexer, error) } // A Lexer returns tokens from a source. type Lexer interface { // Next consumes and returns the next token. Next() (Token, error) } // SymbolsByRune returns a map of lexer symbol names keyed by rune. func SymbolsByRune(def Definition) map[TokenType]string { symbols := def.Symbols() out := make(map[TokenType]string, len(symbols)) for s, r := range symbols { out[r] = s } return out } // NameOfReader attempts to retrieve the filename of a reader. func NameOfReader(r interface{}) string { if nr, ok := r.(interface{ Name() string }); ok { return nr.Name() } return "" } // Must takes the result of a Definition constructor call and returns the definition, but panics if // it errors // // eg. // // lex = lexer.Must(lexer.Build(`Symbol = "symbol" .`)) func Must(def Definition, err error) Definition { if err != nil { panic(err) } return def } // ConsumeAll reads all tokens from a Lexer. func ConsumeAll(lexer Lexer) ([]Token, error) { tokens := make([]Token, 0, 1024) for { token, err := lexer.Next() if err != nil { return nil, err } tokens = append(tokens, token) if token.Type == EOF { return tokens, nil } } } // Position of a token. type Position struct { Filename string Offset int Line int Column int } // Advance the Position based on the number of characters and newlines in "span". func (p *Position) Advance(span string) { p.Offset += len(span) lines := strings.Count(span, "\n") p.Line += lines // Update column. if lines == 0 { p.Column += utf8.RuneCountInString(span) } else { p.Column = utf8.RuneCountInString(span[strings.LastIndex(span, "\n"):]) } } // Add returns a new Position that is the sum of this position and "pos". // // This is useful when parsing values from a parent grammar. func (p Position) Add(pos Position) Position { p.Line += pos.Line - 1 if pos.Line > 1 { p.Column = pos.Column } else { p.Column += pos.Column - 1 } p.Offset += pos.Offset return p } func (p Position) GoString() string { return fmt.Sprintf("Position{Filename: %q, Offset: %d, Line: %d, Column: %d}", p.Filename, p.Offset, p.Line, p.Column) } func (p Position) String() string { filename := p.Filename if filename == "" { return fmt.Sprintf("%d:%d", p.Line, p.Column) } return fmt.Sprintf("%s:%d:%d", filename, p.Line, p.Column) } // A Token returned by a Lexer. type Token struct { // Type of token. This is the value keyed by symbol as returned by Definition.Symbols(). Type TokenType Value string Pos Position } // EOF returns true if this Token is an EOF token. func (t Token) EOF() bool { return t.Type == EOF } func (t Token) String() string { if t.EOF() { return "" } return t.Value } func (t Token) GoString() string { if t.Pos == (Position{}) { return fmt.Sprintf("Token{%d, %q}", t.Type, t.Value) } return fmt.Sprintf("Token@%s{%d, %q}", t.Pos.String(), t.Type, t.Value) } // MakeSymbolTable builds a lookup table for checking token ID existence. // // For each symbolic name in "types", the returned map will contain the corresponding token ID as a key. func MakeSymbolTable(def Definition, types ...string) (map[TokenType]bool, error) { symbols := def.Symbols() table := make(map[TokenType]bool, len(types)) for _, symbol := range types { rn, ok := symbols[symbol] if !ok { return nil, fmt.Errorf("lexer does not support symbol %q", symbol) } table[rn] = true } return table, nil } golang-github-alecthomas-participle-v2-2.1.4/lexer/doc.go000066400000000000000000000024731505300366400232650ustar00rootroot00000000000000// Package lexer defines interfaces and implementations used by Participle to perform lexing. // // The primary interfaces are Definition and Lexer. There are two concrete implementations // included. The first is one based on Go's text/scanner package. The second is Participle's // default stateful/modal lexer. // // The stateful lexer is based heavily on the approach used by Chroma (and Pygments). // // It is a state machine defined by a map of rules keyed by state. Each rule // is a named regex and optional operation to apply when the rule matches. // // As a convenience, any Rule starting with a lowercase letter will be elided from output. // // Lexing starts in the "Root" group. Each rule is matched in order, with the first // successful match producing a lexeme. If the matching rule has an associated Action // it will be executed. // // A state change can be introduced with the Action `Push(state)`. `Pop()` will // return to the previous state. // // To reuse rules from another state, use `Include(state)`. // // As a special case, regexes containing backrefs in the form \N (where N is a digit) // will match the corresponding capture group from the immediate parent group. This // can be used to parse, among other things, heredocs. // // See the README, example and tests in this package for details. package lexer golang-github-alecthomas-participle-v2-2.1.4/lexer/errors.go000066400000000000000000000022771505300366400240360ustar00rootroot00000000000000package lexer import "fmt" // This file exists to break circular imports. The types and functions in here // mirror those in the participle package. type errorInterface interface { error Message() string Position() Position } // Error represents an error while lexing. // // It complies with the participle.Error interface. type Error struct { Msg string Pos Position } var _ errorInterface = &Error{} // Creates a new Error at the given position. func errorf(pos Position, format string, args ...interface{}) *Error { return &Error{Msg: fmt.Sprintf(format, args...), Pos: pos} } func (e *Error) Message() string { return e.Msg } // nolint: golint func (e *Error) Position() Position { return e.Pos } // nolint: golint // Error formats the error with FormatError. func (e *Error) Error() string { return formatError(e.Pos, e.Msg) } // An error in the form "[:][::] " func formatError(pos Position, message string) string { msg := "" if pos.Filename != "" { msg += pos.Filename + ":" } if pos.Line != 0 || pos.Column != 0 { msg += fmt.Sprintf("%d:%d:", pos.Line, pos.Column) } if msg != "" { msg += " " + message } else { msg = message } return msg } golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/000077500000000000000000000000001505300366400237775ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/basiclexer.go000066400000000000000000000245751505300366400264640ustar00rootroot00000000000000// Code generated by Participle. DO NOT EDIT. package internal import ( "fmt" "io" "regexp/syntax" "strings" "sync" "unicode/utf8" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) var _ syntax.Op var _ fmt.State const _ = utf8.RuneError var GeneratedBasicBackRefCache sync.Map var GeneratedBasicLexer lexer.Definition = lexerGeneratedBasicDefinitionImpl{} type lexerGeneratedBasicDefinitionImpl struct{} func (lexerGeneratedBasicDefinitionImpl) Symbols() map[string]lexer.TokenType { return map[string]lexer.TokenType{ "Comment": -7, "EOF": -1, "EOL": -6, "Ident": -4, "Number": -3, "Punct": -5, "String": -2, "Whitespace": -8, } } func (lexerGeneratedBasicDefinitionImpl) LexString(filename string, s string) (lexer.Lexer, error) { return &lexerGeneratedBasicImpl{ s: s, pos: lexer.Position{ Filename: filename, Line: 1, Column: 1, }, states: []lexerGeneratedBasicState{{name: "Root"}}, }, nil } func (d lexerGeneratedBasicDefinitionImpl) LexBytes(filename string, b []byte) (lexer.Lexer, error) { return d.LexString(filename, string(b)) } func (d lexerGeneratedBasicDefinitionImpl) Lex(filename string, r io.Reader) (lexer.Lexer, error) { s := &strings.Builder{} _, err := io.Copy(s, r) if err != nil { return nil, err } return d.LexString(filename, s.String()) } type lexerGeneratedBasicState struct { name string groups []string } type lexerGeneratedBasicImpl struct { s string p int pos lexer.Position states []lexerGeneratedBasicState } func (l *lexerGeneratedBasicImpl) Next() (lexer.Token, error) { if l.p == len(l.s) { return lexer.EOFToken(l.pos), nil } var ( state = l.states[len(l.states)-1] groups []int sym lexer.TokenType ) switch state.name { case "Root": if match := matchGeneratedBasicString(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 { sym = -2 groups = match[:] } else if match := matchGeneratedBasicNumber(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 { sym = -3 groups = match[:] } else if match := matchGeneratedBasicIdent(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 { sym = -4 groups = match[:] } else if match := matchGeneratedBasicPunct(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 { sym = -5 groups = match[:] } else if match := matchGeneratedBasicEOL(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 { sym = -6 groups = match[:] } else if match := matchGeneratedBasicComment(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 { sym = -7 groups = match[:] } else if match := matchGeneratedBasicWhitespace(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 { sym = -8 groups = match[:] } } if groups == nil { sample := []rune(l.s[l.p:]) if len(sample) > 16 { sample = append(sample[:16], []rune("...")...) } return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", string(sample)) } pos := l.pos span := l.s[groups[0]:groups[1]] l.p = groups[1] l.pos.Advance(span) return lexer.Token{ Type: sym, Value: span, Pos: pos, }, nil } func (l *lexerGeneratedBasicImpl) sgroups(match []int) []string { sgroups := make([]string, len(match)/2) for i := 0; i < len(match)-1; i += 2 { sgroups[i/2] = l.s[l.p+match[i] : l.p+match[i+1]] } return sgroups } // "(\\"|[^"])*" func matchGeneratedBasicString(s string, p int, backrefs []string) (groups [4]int) { // " (Literal) l0 := func(s string, p int) int { if p < len(s) && s[p] == '"' { return p + 1 } return -1 } // \\" (Literal) l1 := func(s string, p int) int { if p+2 <= len(s) && s[p:p+2] == "\\\"" { return p + 2 } return -1 } // [^"] (CharClass) l2 := func(s string, p int) int { if len(s) <= p { return -1 } var ( rn rune n int ) if s[p] < utf8.RuneSelf { rn, n = rune(s[p]), 1 } else { rn, n = utf8.DecodeRuneInString(s[p:]) } switch { case rn >= '\x00' && rn <= '!': return p + 1 case rn >= '#' && rn <= '\U0010ffff': return p + n } return -1 } // \\"|[^"] (Alternate) l3 := func(s string, p int) int { if np := l1(s, p); np != -1 { return np } if np := l2(s, p); np != -1 { return np } return -1 } // (\\"|[^"]) (Capture) l4 := func(s string, p int) int { np := l3(s, p) if np != -1 { groups[2] = p groups[3] = np } return np } // (\\"|[^"])* (Star) l5 := func(s string, p int) int { for len(s) > p { if np := l4(s, p); np == -1 { return p } else { p = np } } return p } // "(\\"|[^"])*" (Concat) l6 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } if p = l5(s, p); p == -1 { return -1 } if p = l0(s, p); p == -1 { return -1 } return p } np := l6(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // [\+\-]?([0-9]*\.)?[0-9]+ func matchGeneratedBasicNumber(s string, p int, backrefs []string) (groups [4]int) { // [\+\-] (CharClass) l0 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] if rn == '+' || rn == '-' { return p + 1 } return -1 } // [\+\-]? (Quest) l1 := func(s string, p int) int { if np := l0(s, p); np != -1 { return np } return p } // [0-9] (CharClass) l2 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] switch { case rn >= '0' && rn <= '9': return p + 1 } return -1 } // [0-9]* (Star) l3 := func(s string, p int) int { for len(s) > p { if np := l2(s, p); np == -1 { return p } else { p = np } } return p } // \. (Literal) l4 := func(s string, p int) int { if p < len(s) && s[p] == '.' { return p + 1 } return -1 } // [0-9]*\. (Concat) l5 := func(s string, p int) int { if p = l3(s, p); p == -1 { return -1 } if p = l4(s, p); p == -1 { return -1 } return p } // ([0-9]*\.) (Capture) l6 := func(s string, p int) int { np := l5(s, p) if np != -1 { groups[2] = p groups[3] = np } return np } // ([0-9]*\.)? (Quest) l7 := func(s string, p int) int { if np := l6(s, p); np != -1 { return np } return p } // [0-9]+ (Plus) l8 := func(s string, p int) int { if p = l2(s, p); p == -1 { return -1 } for len(s) > p { if np := l2(s, p); np == -1 { return p } else { p = np } } return p } // [\+\-]?([0-9]*\.)?[0-9]+ (Concat) l9 := func(s string, p int) int { if p = l1(s, p); p == -1 { return -1 } if p = l7(s, p); p == -1 { return -1 } if p = l8(s, p); p == -1 { return -1 } return p } np := l9(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // [A-Z_a-z][0-9A-Z_a-z]* func matchGeneratedBasicIdent(s string, p int, backrefs []string) (groups [2]int) { // [A-Z_a-z] (CharClass) l0 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] switch { case rn >= 'A' && rn <= 'Z': return p + 1 case rn == '_': return p + 1 case rn >= 'a' && rn <= 'z': return p + 1 } return -1 } // [0-9A-Z_a-z] (CharClass) l1 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] switch { case rn >= '0' && rn <= '9': return p + 1 case rn >= 'A' && rn <= 'Z': return p + 1 case rn == '_': return p + 1 case rn >= 'a' && rn <= 'z': return p + 1 } return -1 } // [0-9A-Z_a-z]* (Star) l2 := func(s string, p int) int { for len(s) > p { if np := l1(s, p); np == -1 { return p } else { p = np } } return p } // [A-Z_a-z][0-9A-Z_a-z]* (Concat) l3 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } if p = l2(s, p); p == -1 { return -1 } return p } np := l3(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // [!-/:-@\[-`\{-~]+ func matchGeneratedBasicPunct(s string, p int, backrefs []string) (groups [2]int) { // [!-/:-@\[-`\{-~] (CharClass) l0 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] switch { case rn >= '!' && rn <= '/': return p + 1 case rn >= ':' && rn <= '@': return p + 1 case rn >= '[' && rn <= '`': return p + 1 case rn >= '{' && rn <= '~': return p + 1 } return -1 } // [!-/:-@\[-`\{-~]+ (Plus) l1 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } for len(s) > p { if np := l0(s, p); np == -1 { return p } else { p = np } } return p } np := l1(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // \n func matchGeneratedBasicEOL(s string, p int, backrefs []string) (groups [2]int) { if p < len(s) && s[p] == '\n' { groups[0] = p groups[1] = p + 1 } return } // (?i:REM)[^\n]*(?i:\n) func matchGeneratedBasicComment(s string, p int, backrefs []string) (groups [2]int) { // (?i:REM) (Literal) l0 := func(s string, p int) int { if p+3 <= len(s) && strings.EqualFold(s[p:p+3], "REM") { return p + 3 } return -1 } // [^\n] (CharClass) l1 := func(s string, p int) int { if len(s) <= p { return -1 } var ( rn rune n int ) if s[p] < utf8.RuneSelf { rn, n = rune(s[p]), 1 } else { rn, n = utf8.DecodeRuneInString(s[p:]) } switch { case rn >= '\x00' && rn <= '\t': return p + 1 case rn >= '\v' && rn <= '\U0010ffff': return p + n } return -1 } // [^\n]* (Star) l2 := func(s string, p int) int { for len(s) > p { if np := l1(s, p); np == -1 { return p } else { p = np } } return p } // (?i:\n) (Literal) l3 := func(s string, p int) int { if p < len(s) && s[p] == '\n' { return p + 1 } return -1 } // (?i:REM)[^\n]*(?i:\n) (Concat) l4 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } if p = l2(s, p); p == -1 { return -1 } if p = l3(s, p); p == -1 { return -1 } return p } np := l4(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } // [\t ]+ func matchGeneratedBasicWhitespace(s string, p int, backrefs []string) (groups [2]int) { // [\t ] (CharClass) l0 := func(s string, p int) int { if len(s) <= p { return -1 } rn := s[p] if rn == '\t' || rn == ' ' { return p + 1 } return -1 } // [\t ]+ (Plus) l1 := func(s string, p int) int { if p = l0(s, p); p == -1 { return -1 } for len(s) > p { if np := l0(s, p); np == -1 { return p } else { p = np } } return p } np := l1(s, p) if np == -1 { return } groups[0] = p groups[1] = np return } golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/basiclexer.json000066400000000000000000000007761505300366400270250ustar00rootroot00000000000000{ "Root": [ { "name": "String", "pattern": "\"(\\\\\"|[^\"])*\"" }, { "name": "Number", "pattern": "[-+]?(\\d*\\.)?\\d+" }, { "name": "Ident", "pattern": "[a-zA-Z_]\\w*" }, { "name": "Punct", "pattern": "[!-/:-@[-`{-~]+" }, { "name": "EOL", "pattern": "\\n" }, { "name": "Comment", "pattern": "(?i)rem[^\\n]*\\n" }, { "name": "Whitespace", "pattern": "[ \\t]+" } ] }golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/conformance/000077500000000000000000000000001505300366400262715ustar00rootroot00000000000000golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/conformance/conformance_codegen_test.go000066400000000000000000000004711505300366400336370ustar00rootroot00000000000000//go:build generated package conformance_test import ( "testing" "github.com/alecthomas/participle/v2/lexer/internal/conformance" ) // This should only be run by TestLexerConformanceGenerated. func TestLexerConformanceGeneratedInternal(t *testing.T) { testLexer(t, conformance.GeneratedConformanceLexer) } golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/conformance/conformance_test.go000066400000000000000000000161261505300366400321570ustar00rootroot00000000000000package conformance_test import ( "encoding/json" "flag" "fmt" "os" "os/exec" "path/filepath" "strings" "testing" "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2/lexer" ) var conformanceLexer = lexer.MustStateful(lexer.Rules{ "Root": { {"ExprTest", `EXPRTEST:`, lexer.Push("ExprTest")}, {"LiteralTest", `LITTEST:`, lexer.Push("LiteralTest")}, {"CaseInsensitiveTest", `CITEST:`, lexer.Push("CaseInsensitiveTest")}, // Use this to test \b at very start of the string! {"WordBoundaryTest", `\bWBTEST:`, lexer.Push("WordBoundaryTest")}, }, "ExprTest": { {"ExprString", `"`, lexer.Push("ExprString")}, // {"ExprHeredoc", `<<(\w+)`, lexer.Push("ExprHeredoc")}, }, "ExprString": { {"ExprEscaped", `\\.`, nil}, {"ExprStringEnd", `"`, lexer.Pop()}, {"Expr", `\${`, lexer.Push("Expr")}, {"ExprChar", `[^$"\\]+`, nil}, }, "Expr": { lexer.Include("ExprTest"), {`Whitespace`, `\s+`, nil}, {`ExprOper`, `[-+/*%]`, nil}, {"Ident", `\w+`, lexer.Push("ExprReference")}, {"ExprEnd", `}`, lexer.Pop()}, }, "ExprReference": { {"ExprDot", `\.`, nil}, {"Ident", `\w+`, nil}, lexer.Return(), }, // "ExprHeredoc": { // {"ExprHeredocEnd", `\1`, lexer.Pop()}, // lexer.Include("Expr"), // }, "LiteralTest": { {`LITOne`, `ONE`, nil}, {`LITKeyword`, `SELECT|FROM|WHERE|LIKE`, nil}, {"Ident", `\w+`, nil}, {"Whitespace", `\s+`, nil}, }, "CaseInsensitiveTest": { {`ABCWord`, `[aA][bB][cC]`, nil}, {`CIKeyword`, `(?i)(SELECT|from|WHERE|LIKE)`, nil}, {"Ident", `\w+`, nil}, {"Whitespace", `\s+`, nil}, }, "WordBoundaryTest": { {`WBKeyword`, `\b(?:abc|xyz)\b`, nil}, {`WBGroupKeyword`, `(?:90|0)\b`, nil}, {"Slash", `/`, nil}, {"Ident", `\w+`, nil}, {"Whitespace", `\s+`, nil}, }, }) type token struct { Type string Value string } func testLexer(t *testing.T, lex lexer.Definition) { t.Helper() tests := []struct { name string input string expected []token }{ {"ExprPush", `EXPRTEST:"${"Hello ${name + "!"}"}"`, []token{ {"ExprString", "\""}, {"Expr", "${"}, {"ExprString", "\""}, {"ExprChar", "Hello "}, {"Expr", "${"}, {"Ident", "name"}, {"Whitespace", " "}, {"ExprOper", "+"}, {"Whitespace", " "}, {"ExprString", "\""}, {"ExprChar", "!"}, {"ExprStringEnd", "\""}, {"ExprEnd", "}"}, {"ExprStringEnd", "\""}, {"ExprEnd", "}"}, {"ExprStringEnd", "\""}, }}, {"ExprReference", `EXPRTEST:"${user.name}"`, []token{ {"ExprString", "\""}, {"Expr", "${"}, {"Ident", "user"}, {"ExprDot", "."}, {"Ident", "name"}, {"ExprEnd", "}"}, {"ExprStringEnd", "\""}, }}, // TODO(alecthomas): Once backreferences are supported, this will work. // {"Backref", `EXPRTEST:<*regexp.Regexp backrefCache sync.Map matchLongest bool } // MustStateful creates a new stateful lexer and panics if it is incorrect. func MustStateful(rules Rules) *StatefulDefinition { def, err := New(rules) if err != nil { panic(err) } return def } // New constructs a new stateful lexer from rules. func New(rules Rules) (*StatefulDefinition, error) { compiled := compiledRules{} for key, set := range rules { for i, rule := range set { if validate, ok := rule.Action.(validatingRule); ok { if err := validate.validate(rules); err != nil { return nil, fmt.Errorf("lexer: invalid action for rule %q: %w", rule.Name, err) } } pattern := "^(?:" + rule.Pattern + ")" var ( re *regexp.Regexp err error ) var match = backrefReplace.FindStringSubmatch(rule.Pattern) if match == nil || len(match[1])%2 == 0 { re, err = regexp.Compile(pattern) if err != nil { return nil, fmt.Errorf("lexer: %s.%d: %s", key, i, err) } } compiled[key] = append(compiled[key], compiledRule{ Rule: rule, ignore: len(rule.Name) > 0 && unicode.IsLower(rune(rule.Name[0])), RE: re, }) } } restart: for state, rules := range compiled { for i, rule := range rules { if action, ok := rule.Action.(RulesAction); ok { if err := action.applyRules(state, i, compiled); err != nil { return nil, fmt.Errorf("lexer: %s.%d: %s", state, i, err) } goto restart } } } keys := make([]string, 0, len(compiled)) for key := range compiled { keys = append(keys, key) } symbols := map[string]TokenType{ "EOF": EOF, } sort.Strings(keys) duplicates := map[string]compiledRule{} rn := EOF - 1 for _, key := range keys { for i, rule := range compiled[key] { if dup, ok := duplicates[rule.Name]; ok && rule.Pattern != dup.Pattern { panic(fmt.Sprintf("lexer: duplicate key %q with different patterns %q != %q", rule.Name, rule.Pattern, dup.Pattern)) } duplicates[rule.Name] = rule compiled[key][i] = rule symbols[rule.Name] = rn rn-- } } d := &StatefulDefinition{ rules: compiled, symbols: symbols, } return d, nil } func (d *StatefulDefinition) MarshalJSON() ([]byte, error) { return json.Marshal(d.rules) } // Rules returns the user-provided Rules used to construct the lexer. func (d *StatefulDefinition) Rules() Rules { out := Rules{} for state, rules := range d.rules { for _, rule := range rules { out[state] = append(out[state], rule.Rule) } } return out } // LexString is a fast-path implementation for lexing strings. func (d *StatefulDefinition) LexString(filename string, s string) (Lexer, error) { return &StatefulLexer{ def: d, data: s, stack: []lexerState{{name: "Root"}}, pos: Position{ Filename: filename, Line: 1, Column: 1, }, }, nil } func (d *StatefulDefinition) Lex(filename string, r io.Reader) (Lexer, error) { // nolint: golint w := &strings.Builder{} _, err := io.Copy(w, r) if err != nil { return nil, err } return d.LexString(filename, w.String()) } func (d *StatefulDefinition) Symbols() map[string]TokenType { // nolint: golint return d.symbols } // lexerState stored when switching states in the lexer. type lexerState struct { name string groups []string } // StatefulLexer implementation. type StatefulLexer struct { stack []lexerState def *StatefulDefinition data string pos Position } func (l *StatefulLexer) Next() (Token, error) { // nolint: golint parent := l.stack[len(l.stack)-1] rules := l.def.rules[parent.name] next: for len(l.data) > 0 { var ( rule *compiledRule m []int match []int ) for i, candidate := range rules { // Special case "Return()". if candidate.Rule == ReturnRule { l.stack = l.stack[:len(l.stack)-1] parent = l.stack[len(l.stack)-1] rules = l.def.rules[parent.name] continue next } re, err := l.getPattern(candidate) if err != nil { return Token{}, errorf(l.pos, "lexer: rule %q: %s", candidate.Name, err) } m = re.FindStringSubmatchIndex(l.data) if m != nil && (match == nil || m[1] > match[1]) { match = m rule = &rules[i] if !l.def.matchLongest { break } } } if match == nil || rule == nil { sample := []rune(l.data) if len(sample) > 16 { sample = append(sample[:16], []rune("...")...) } return Token{}, errorf(l.pos, "lexer: invalid input text %q", string(sample)) } if rule.Action != nil { groups := make([]string, 0, len(match)/2) for i := 0; i < len(match); i += 2 { groups = append(groups, l.data[match[i]:match[i+1]]) } if err := rule.Action.applyAction(l, groups); err != nil { return Token{}, errorf(l.pos, "lexer: rule %q: %s", rule.Name, err) } } else if match[0] == match[1] { return Token{}, errorf(l.pos, "lexer: rule %q did not match any input", rule.Name) } span := l.data[match[0]:match[1]] l.data = l.data[match[1]:] // l.groups = groups // Update position. pos := l.pos l.pos.Advance(span) if rule.ignore { parent = l.stack[len(l.stack)-1] rules = l.def.rules[parent.name] continue } return Token{ Type: l.def.symbols[rule.Name], Value: span, Pos: pos, }, nil } return EOFToken(l.pos), nil } func (l *StatefulLexer) getPattern(candidate compiledRule) (*regexp.Regexp, error) { if candidate.RE != nil { return candidate.RE, nil } // We don't have a compiled RE. This means there are back-references // that need to be substituted first. return BackrefRegex(&l.def.backrefCache, candidate.Pattern, l.stack[len(l.stack)-1].groups) } // BackrefRegex returns a compiled regular expression with backreferences replaced by groups. func BackrefRegex(backrefCache *sync.Map, input string, groups []string) (*regexp.Regexp, error) { key := input + "\000" + strings.Join(groups, "\000") cached, ok := backrefCache.Load(key) if ok { return cached.(*regexp.Regexp), nil } var ( re *regexp.Regexp err error ) pattern := backrefReplace.ReplaceAllStringFunc(input, func(s string) string { var rematch = backrefReplace.FindStringSubmatch(s) n, nerr := strconv.ParseInt(rematch[2], 10, 64) if nerr != nil { err = nerr return s } if len(groups) == 0 || int(n) >= len(groups) { err = fmt.Errorf("invalid group %d from parent with %d groups", n, len(groups)) return s } // concatenate the leading \\\\ which are already escaped to the quoted match. return rematch[1][:len(rematch[1])-1] + regexp.QuoteMeta(groups[n]) }) if err == nil { re, err = regexp.Compile("^(?:" + pattern + ")") } if err != nil { return nil, fmt.Errorf("invalid backref expansion: %q: %s", pattern, err) } backrefCache.Store(key, re) return re, nil } golang-github-alecthomas-participle-v2-2.1.4/lexer/stateful_test.go000066400000000000000000000255111505300366400254040ustar00rootroot00000000000000package lexer_test import ( "encoding/json" "log" "strings" "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" "github.com/alecthomas/participle/v2/lexer/internal" "github.com/alecthomas/repr" ) var interpolatedRules = lexer.Rules{ "Root": { {`String`, `"`, lexer.Push("String")}, }, "String": { {"Escaped", `\\.`, nil}, {"StringEnd", `"`, lexer.Pop()}, {"Expr", `\${`, lexer.Push("Expr")}, {"Char", `[^$"\\]+`, nil}, }, "Expr": { lexer.Include("Root"), {`whitespace`, `\s+`, nil}, {`Oper`, `[-+/*%]`, nil}, {"Ident", `\w+`, nil}, {"ExprEnd", `}`, lexer.Pop()}, }, } func TestMarshalUnmarshal(t *testing.T) { data, err := json.MarshalIndent(interpolatedRules, "", " ") require.NoError(t, err) unmarshalledRules := lexer.Rules{} err = json.Unmarshal(data, &unmarshalledRules) require.NoError(t, err) require.Equal(t, interpolatedRules, unmarshalledRules) } func TestStatefulLexer(t *testing.T) { tests := []struct { name string rules lexer.Rules input string tokens []string err string buildErr string }{ {name: "InvalidPushTarget", buildErr: `lexer: invalid action for rule "foo": lexer: push to unknown state "Invalid"`, rules: lexer.Rules{"Root": {{`foo`, ``, lexer.Push("Invalid")}}}, }, {name: "BackrefNoGroups", input: `hello`, err: `1:1: lexer: rule "Backref": invalid backref expansion: "\\1": invalid group 1 from parent with 0 groups`, rules: lexer.Rules{"Root": {{"Backref", `\1`, nil}}}, }, {name: "BackrefInvalidGroups", input: `<"] roots: [0, 1]}, // {history: ["<"], roots: [2, 3]}, // ] // 1. groups = [ // {history: [">", "="], roots: [0]}, // {history: [">"], roots: [1]}, // {history: ["<", "="], roots: [2]}, // {history: ["<"], roots: [3]}, // ] func TestLookaheadWithConvergingTokens(t *testing.T) { type grammar struct { Left string `@Ident` Op string `[ @( ">" "=" | ">" | "<" "=" | "<" )` Next *grammar ` @@ ]` } p := mustTestParser[grammar](t, participle.UseLookahead(5)) _, err := p.ParseString("", "a >= b") require.NoError(t, err) } func TestIssue27(t *testing.T) { type grammar struct { Number int ` @(["-"] Int)` String string `| @String` } p := mustTestParser[grammar](t) actual, err := p.ParseString("", `- 100`) require.NoError(t, err) require.Equal(t, &grammar{Number: -100}, actual) actual, err = p.ParseString("", `100`) require.NoError(t, err) require.Equal(t, &grammar{Number: 100}, actual) } func TestLookaheadDisambiguateByType(t *testing.T) { type grammar struct { Int int ` @(["-"] Int)` Float float64 `| @(["-"] Float)` } p := mustTestParser[grammar](t, participle.UseLookahead(5)) actual, err := p.ParseString("", `- 100`) require.NoError(t, err) require.Equal(t, &grammar{Int: -100}, actual) actual, err = p.ParseString("", `- 100.5`) require.NoError(t, err) require.Equal(t, &grammar{Float: -100.5}, actual) } func TestShowNearestError(t *testing.T) { type grammar struct { A string ` @"a" @"b" @"c"` B string `| @"a" @"z"` } p := mustTestParser[grammar](t, participle.UseLookahead(10)) _, err := p.ParseString("", `a b d`) require.EqualError(t, err, `1:5: unexpected token "d" (expected "c")`) } func TestRewindDisjunction(t *testing.T) { type grammar struct { Function string ` @Ident "(" ")"` Ident string `| @Ident` } p := mustTestParser[grammar](t, participle.UseLookahead(2)) ast, err := p.ParseString("", `name`) require.NoError(t, err) require.Equal(t, &grammar{Ident: "name"}, ast) } func TestRewindOptional(t *testing.T) { type grammar struct { Var string ` [ "int" "int" ] @Ident` } p := mustTestParser[grammar](t, participle.UseLookahead(3)) ast, err := p.ParseString("", `one`) require.NoError(t, err) require.Equal(t, &grammar{Var: "one"}, ast) ast, err = p.ParseString("", `int int one`) require.NoError(t, err) require.Equal(t, &grammar{Var: "one"}, ast) } func TestRewindRepetition(t *testing.T) { type grammar struct { Ints []string `(@"int")*` Ident string `@Ident` } p := mustTestParser[grammar](t, participle.UseLookahead(3)) ast, err := p.ParseString("", `int int one`) require.NoError(t, err) require.Equal(t, &grammar{Ints: []string{"int", "int"}, Ident: "one"}, ast) ast, err = p.ParseString("", `int int one`) require.NoError(t, err) require.Equal(t, &grammar{Ints: []string{"int", "int"}, Ident: "one"}, ast) } golang-github-alecthomas-participle-v2-2.1.4/map.go000066400000000000000000000051521505300366400221530ustar00rootroot00000000000000package participle import ( "io" "strconv" "strings" "github.com/alecthomas/participle/v2/lexer" ) type mapperByToken struct { symbols []string mapper Mapper } // Mapper function for mutating tokens before being applied to the AST. type Mapper func(token lexer.Token) (lexer.Token, error) // Map is an Option that configures the Parser to apply a mapping function to each Token from the lexer. // // This can be useful to eg. upper-case all tokens of a certain type, or dequote strings. // // "symbols" specifies the token symbols that the Mapper will be applied to. If empty, all tokens will be mapped. func Map(mapper Mapper, symbols ...string) Option { return func(p *parserOptions) error { p.mappers = append(p.mappers, mapperByToken{ mapper: mapper, symbols: symbols, }) return nil } } // Unquote applies strconv.Unquote() to tokens of the given types. // // Tokens of type "String" will be unquoted if no other types are provided. func Unquote(types ...string) Option { if len(types) == 0 { types = []string{"String"} } return Map(func(t lexer.Token) (lexer.Token, error) { value, err := unquote(t.Value) if err != nil { return t, Errorf(t.Pos, "invalid quoted string %q: %s", t.Value, err.Error()) } t.Value = value return t, nil }, types...) } func unquote(s string) (string, error) { quote := s[0] s = s[1 : len(s)-1] out := "" for s != "" { value, _, tail, err := strconv.UnquoteChar(s, quote) if err != nil { return "", err } s = tail out += string(value) } return out, nil } // Upper is an Option that upper-cases all tokens of the given type. Useful for case normalisation. func Upper(types ...string) Option { return Map(func(token lexer.Token) (lexer.Token, error) { token.Value = strings.ToUpper(token.Value) return token, nil }, types...) } // Elide drops tokens of the specified types. func Elide(types ...string) Option { return func(p *parserOptions) error { p.elide = append(p.elide, types...) return nil } } // Apply a Mapping to all tokens coming out of a Lexer. type mappingLexerDef struct { l lexer.Definition mapper Mapper } var _ lexer.Definition = &mappingLexerDef{} func (m *mappingLexerDef) Symbols() map[string]lexer.TokenType { return m.l.Symbols() } func (m *mappingLexerDef) Lex(filename string, r io.Reader) (lexer.Lexer, error) { l, err := m.l.Lex(filename, r) if err != nil { return nil, err } return &mappingLexer{l, m.mapper}, nil } type mappingLexer struct { lexer.Lexer mapper Mapper } func (m *mappingLexer) Next() (lexer.Token, error) { t, err := m.Lexer.Next() if err != nil { return t, err } return m.mapper(t) } golang-github-alecthomas-participle-v2-2.1.4/map_test.go000066400000000000000000000036521505300366400232150ustar00rootroot00000000000000package participle_test import ( "strings" "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) func TestUpper(t *testing.T) { type grammar struct { Text string `@Ident` } def := lexer.MustSimple([]lexer.SimpleRule{ {"Whitespace", `\s+`}, {"Ident", `\w+`}, }) parser := mustTestParser[grammar](t, participle.Lexer(def), participle.Upper("Ident")) actual, err := parser.Lex("", strings.NewReader("hello world")) require.NoError(t, err) expected := []lexer.Token{ {Type: -3, Value: "HELLO", Pos: lexer.Position{Filename: "", Offset: 0, Line: 1, Column: 1}}, {Type: -2, Value: " ", Pos: lexer.Position{Filename: "", Offset: 5, Line: 1, Column: 6}}, {Type: -3, Value: "WORLD", Pos: lexer.Position{Filename: "", Offset: 6, Line: 1, Column: 7}}, {Type: lexer.EOF, Value: "", Pos: lexer.Position{Filename: "", Offset: 11, Line: 1, Column: 12}}, } require.Equal(t, expected, actual) } func TestUnquote(t *testing.T) { type grammar struct { Text string `@Ident` } lex := lexer.MustSimple([]lexer.SimpleRule{ {"whitespace", `\s+`}, {"Ident", `\w+`}, {"String", `\"(?:[^\"]|\\.)*\"`}, {"RawString", "`[^`]*`"}, }) parser := mustTestParser[grammar](t, participle.Lexer(lex), participle.Unquote("String", "RawString")) actual, err := parser.Lex("", strings.NewReader("hello world \"quoted\\tstring\" `backtick quotes`")) require.NoError(t, err) expected := []lexer.Token{ {Type: -3, Value: "hello", Pos: lexer.Position{Line: 1, Column: 1}}, {Type: -3, Value: "world", Pos: lexer.Position{Offset: 6, Line: 1, Column: 7}}, {Type: -4, Value: "quoted\tstring", Pos: lexer.Position{Offset: 12, Line: 1, Column: 13}}, {Type: -5, Value: "backtick quotes", Pos: lexer.Position{Offset: 29, Line: 1, Column: 30}}, {Type: lexer.EOF, Value: "", Pos: lexer.Position{Offset: 46, Line: 1, Column: 47}}, } require.Equal(t, expected, actual) } golang-github-alecthomas-participle-v2-2.1.4/nodes.go000066400000000000000000000475321505300366400225160ustar00rootroot00000000000000package participle import ( "encoding" "errors" "fmt" "reflect" "strconv" "strings" "github.com/alecthomas/participle/v2/lexer" ) var ( // MaxIterations limits the number of elements capturable by {}. MaxIterations = 1000000 positionType = reflect.TypeOf(lexer.Position{}) tokenType = reflect.TypeOf(lexer.Token{}) tokensType = reflect.TypeOf([]lexer.Token{}) captureType = reflect.TypeOf((*Capture)(nil)).Elem() textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() parseableType = reflect.TypeOf((*Parseable)(nil)).Elem() // NextMatch should be returned by Parseable.Parse() method implementations to indicate // that the node did not match and that other matches should be attempted, if appropriate. NextMatch = errors.New("no match") // nolint: golint ) // A node in the grammar. type node interface { // Parse from scanner into value. // // Returned slice will be nil if the node does not match. Parse(ctx *parseContext, parent reflect.Value) ([]reflect.Value, error) // Return a decent string representation of the Node. fmt.Stringer fmt.GoStringer } func decorate(err *error, name func() string) { if *err == nil { return } if perr, ok := (*err).(Error); ok { *err = Errorf(perr.Position(), "%s: %s", name(), perr.Message()) } else { *err = &ParseError{Msg: fmt.Sprintf("%s: %s", name(), *err)} } } // A node that proxies to an implementation that implements the Parseable interface. type parseable struct { t reflect.Type } func (p *parseable) String() string { return ebnf(p) } func (p *parseable) GoString() string { return p.t.String() } func (p *parseable) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(p)() rv := reflect.New(p.t) v := rv.Interface().(Parseable) err = v.Parse(&ctx.PeekingLexer) if err != nil { if err == NextMatch { return nil, nil } return nil, err } return []reflect.Value{rv.Elem()}, nil } // @@ (but for a custom production) type custom struct { typ reflect.Type parseFn reflect.Value } func (c *custom) String() string { return ebnf(c) } func (c *custom) GoString() string { return c.typ.Name() } func (c *custom) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(c)() results := c.parseFn.Call([]reflect.Value{reflect.ValueOf(&ctx.PeekingLexer)}) if err, _ := results[1].Interface().(error); err != nil { if err == NextMatch { return nil, nil } return nil, err } return []reflect.Value{results[0]}, nil } // @@ (for a union) type union struct { unionDef disjunction disjunction } func (u *union) String() string { return ebnf(u) } func (u *union) GoString() string { return u.typ.Name() } func (u *union) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(u)() vals, err := u.disjunction.Parse(ctx, parent) if err != nil { return nil, err } for i := range vals { vals[i] = maybeRef(u.members[i], vals[i]).Convert(u.typ) } return vals, nil } // @@ type strct struct { typ reflect.Type expr node tokensFieldIndex []int posFieldIndex []int endPosFieldIndex []int usages int } func newStrct(typ reflect.Type) *strct { s := &strct{ typ: typ, usages: 1, } field, ok := typ.FieldByName("Pos") if ok && positionType.ConvertibleTo(field.Type) { s.posFieldIndex = field.Index } field, ok = typ.FieldByName("EndPos") if ok && positionType.ConvertibleTo(field.Type) { s.endPosFieldIndex = field.Index } field, ok = typ.FieldByName("Tokens") if ok && field.Type == tokensType { s.tokensFieldIndex = field.Index } return s } func (s *strct) String() string { return ebnf(s) } func (s *strct) GoString() string { return s.typ.Name() } func (s *strct) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(s)() sv := reflect.New(s.typ).Elem() start := ctx.RawCursor() t := ctx.Peek() s.maybeInjectStartToken(t, sv) if out, err = s.expr.Parse(ctx, sv); err != nil { _ = ctx.Apply() // Best effort to give partial AST. ctx.MaybeUpdateError(err) return []reflect.Value{sv}, err } else if out == nil { return nil, nil } end := ctx.RawCursor() t = ctx.RawPeek() s.maybeInjectEndToken(t, sv) s.maybeInjectTokens(ctx.Range(start, end), sv) return []reflect.Value{sv}, ctx.Apply() } func (s *strct) maybeInjectStartToken(token *lexer.Token, v reflect.Value) { if s.posFieldIndex == nil { return } f := v.FieldByIndex(s.posFieldIndex) f.Set(reflect.ValueOf(token.Pos).Convert(f.Type())) } func (s *strct) maybeInjectEndToken(token *lexer.Token, v reflect.Value) { if s.endPosFieldIndex == nil { return } f := v.FieldByIndex(s.endPosFieldIndex) f.Set(reflect.ValueOf(token.Pos).Convert(f.Type())) } func (s *strct) maybeInjectTokens(tokens []lexer.Token, v reflect.Value) { if s.tokensFieldIndex == nil { return } v.FieldByIndex(s.tokensFieldIndex).Set(reflect.ValueOf(tokens)) } type groupMatchMode int func (g groupMatchMode) String() string { switch g { case groupMatchOnce: return "n" case groupMatchZeroOrOne: return "n?" case groupMatchZeroOrMore: return "n*" case groupMatchOneOrMore: return "n+" case groupMatchNonEmpty: return "n!" } panic("??") } const ( groupMatchOnce groupMatchMode = iota groupMatchZeroOrOne = iota groupMatchZeroOrMore = iota groupMatchOneOrMore = iota groupMatchNonEmpty = iota ) // ( ) - match once // ( )* - match zero or more times // ( )+ - match one or more times // ( )? - match zero or once // ( )! - must be a non-empty match // // The additional modifier "!" forces the content of the group to be non-empty if it does match. type group struct { expr node mode groupMatchMode } func (g *group) String() string { return ebnf(g) } func (g *group) GoString() string { return fmt.Sprintf("group{%s}", g.mode) } func (g *group) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(g)() // Configure min/max matches. min := 1 max := 1 switch g.mode { case groupMatchNonEmpty: out, err = g.expr.Parse(ctx, parent) if err != nil { return out, err } if len(out) == 0 { t := ctx.Peek() return out, Errorf(t.Pos, "sub-expression %s cannot be empty", g) } return out, nil case groupMatchOnce: return g.expr.Parse(ctx, parent) case groupMatchZeroOrOne: min = 0 case groupMatchZeroOrMore: min = 0 max = MaxIterations case groupMatchOneOrMore: min = 1 max = MaxIterations } matches := 0 for ; matches < max; matches++ { branch := ctx.Branch() v, err := g.expr.Parse(branch, parent) if err != nil { ctx.MaybeUpdateError(err) // Optional part failed to match. if ctx.Stop(err, branch) { out = append(out, v...) // Try to return as much of the parse tree as possible return out, err } break } out = append(out, v...) ctx.Accept(branch) if v == nil { break } } // fmt.Printf("%d < %d < %d: out == nil? %v\n", min, matches, max, out == nil) t := ctx.Peek() if matches >= MaxIterations { return nil, Errorf(t.Pos, "too many iterations of %s (> %d)", g, MaxIterations) } // avoid returning errors in parent nodes if the group is optional if matches > 0 && matches < min { return out, Errorf(t.Pos, "sub-expression %s must match at least once", g) } // The idea here is that something like "a"? is a successful match and that parsing should proceed. if min == 0 && out == nil { out = []reflect.Value{} } return out, nil } // (?= ) for positive lookahead, (?! ) for negative lookahead; neither consumes input type lookaheadGroup struct { expr node negative bool } func (l *lookaheadGroup) String() string { return ebnf(l) } func (l *lookaheadGroup) GoString() string { return "lookaheadGroup{}" } func (l *lookaheadGroup) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(l)() // Create a branch to avoid advancing the parser as any match will be discarded branch := ctx.Branch() out, err = l.expr.Parse(branch, parent) matchedLookahead := err == nil && out != nil expectingMatch := !l.negative if matchedLookahead != expectingMatch { return nil, &UnexpectedTokenError{Unexpected: *ctx.Peek()} } return []reflect.Value{}, nil // Empty match slice means a match, unlike nil } // {"|" } type disjunction struct { nodes []node } func (d *disjunction) String() string { return ebnf(d) } func (d *disjunction) GoString() string { return "disjunction{}" } func (d *disjunction) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(d)() var ( deepestError = 0 firstError error firstValues []reflect.Value ) for _, a := range d.nodes { branch := ctx.Branch() if value, err := a.Parse(branch, parent); err != nil { // If this branch progressed too far and still didn't match, error out. if ctx.Stop(err, branch) { return value, err } // Show the closest error returned. The idea here is that the further the parser progresses // without error, the more difficult it is to trace the error back to its root. if branch.Cursor() >= deepestError { firstError = err firstValues = value deepestError = branch.Cursor() } } else if value != nil { bt := branch.RawPeek() ct := ctx.RawPeek() if bt == ct && bt.Type != lexer.EOF { panic(Errorf(bt.Pos, "branch %s was accepted but did not progress the lexer at %s (%q)", a, bt.Pos, bt.Value)) } ctx.Accept(branch) return value, nil } } if firstError != nil { ctx.MaybeUpdateError(firstError) return firstValues, firstError } return nil, nil } // ... type sequence struct { head bool // True if this is the head node. node node next *sequence } func (s *sequence) String() string { return ebnf(s) } func (s *sequence) GoString() string { return "sequence{}" } func (s *sequence) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(s)() for n := s; n != nil; n = n.next { child, err := n.node.Parse(ctx, parent) out = append(out, child...) if err != nil { return out, err } if child == nil { // Early exit if first value doesn't match, otherwise all values must match. if n == s { return nil, nil } token := ctx.Peek() return out, &UnexpectedTokenError{Unexpected: *token, expectNode: n} } // Special-case for when children return an empty match. // Appending an empty, non-nil slice to a nil slice returns a nil slice. // https://go.dev/play/p/lV1Xk-IP6Ta if out == nil { out = []reflect.Value{} } } return out, nil } // @ type capture struct { field structLexerField node node } func (c *capture) String() string { return ebnf(c) } func (c *capture) GoString() string { return "capture{}" } func (c *capture) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(c)() start := ctx.RawCursor() v, err := c.node.Parse(ctx, parent) if v != nil { ctx.Defer(ctx.Range(start, ctx.RawCursor()), parent, c.field, v) } if err != nil { return []reflect.Value{parent}, err } if v == nil { return nil, nil } return []reflect.Value{parent}, nil } // - named lexer token reference type reference struct { typ lexer.TokenType identifier string // Used for informational purposes. } func (r *reference) String() string { return ebnf(r) } func (r *reference) GoString() string { return fmt.Sprintf("reference{%s}", r.identifier) } func (r *reference) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(r)() token, cursor := ctx.PeekAny(func(t lexer.Token) bool { return t.Type == r.typ }) if token.Type != r.typ { return nil, nil } ctx.FastForward(cursor) return []reflect.Value{reflect.ValueOf(token.Value)}, nil } // Match a token literal exactly "..."[:]. type literal struct { s string t lexer.TokenType tt string // Used for display purposes - symbolic name of t. } func (l *literal) String() string { return ebnf(l) } func (l *literal) GoString() string { return fmt.Sprintf("literal{%q, %q}", l.s, l.tt) } func (l *literal) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(l)() match := func(t lexer.Token) bool { var equal bool if ctx.caseInsensitive[t.Type] { equal = l.s == "" || strings.EqualFold(t.Value, l.s) } else { equal = l.s == "" || t.Value == l.s } return (l.t == lexer.EOF || l.t == t.Type) && equal } token, cursor := ctx.PeekAny(match) if match(token) { ctx.FastForward(cursor) return []reflect.Value{reflect.ValueOf(token.Value)}, nil } return nil, nil } type negation struct { node node } func (n *negation) String() string { return ebnf(n) } func (n *negation) GoString() string { return "negation{}" } func (n *negation) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) { defer ctx.printTrace(n)() // Create a branch to avoid advancing the parser, but call neither Stop nor Accept on it // since we will discard a match. branch := ctx.Branch() notEOF := ctx.Peek() if notEOF.EOF() { // EOF cannot match a negation, which expects something return nil, nil } out, err = n.node.Parse(branch, parent) if out != nil && err == nil { // out being non-nil means that what we don't want is actually here, so we report nomatch return nil, &UnexpectedTokenError{Unexpected: *notEOF} } // Just give the next token next := ctx.Next() return []reflect.Value{reflect.ValueOf(next.Value)}, nil } // Attempt to transform values to given type. // // This will dereference pointers, and attempt to parse strings into integer values, floats, etc. func conform(t reflect.Type, values []reflect.Value) (out []reflect.Value, err error) { for _, v := range values { for t != v.Type() && t.Kind() == reflect.Ptr && v.Kind() != reflect.Ptr { // This can occur during partial failure. if !v.CanAddr() { return } v = v.Addr() } // Already of the right kind, don't bother converting. if v.Kind() == t.Kind() { if v.Type() != t { v = v.Convert(t) } out = append(out, v) continue } kind := t.Kind() switch kind { // nolint: exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: n, err := strconv.ParseInt(v.String(), 0, sizeOfKind(kind)) if err != nil { return nil, err } v = reflect.New(t).Elem() v.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: n, err := strconv.ParseUint(v.String(), 0, sizeOfKind(kind)) if err != nil { return nil, err } v = reflect.New(t).Elem() v.SetUint(n) case reflect.Bool: v = reflect.ValueOf(true) case reflect.Float32, reflect.Float64: n, err := strconv.ParseFloat(v.String(), sizeOfKind(kind)) if err != nil { return nil, err } v = reflect.New(t).Elem() v.SetFloat(n) } out = append(out, v) } return out, nil } func sizeOfKind(kind reflect.Kind) int { switch kind { // nolint: exhaustive case reflect.Int8, reflect.Uint8: return 8 case reflect.Int16, reflect.Uint16: return 16 case reflect.Int32, reflect.Uint32, reflect.Float32: return 32 case reflect.Int64, reflect.Uint64, reflect.Float64: return 64 case reflect.Int, reflect.Uint: return strconv.IntSize } panic("unsupported kind " + kind.String()) } func maybeRef(tmpl reflect.Type, strct reflect.Value) reflect.Value { if strct.Type() == tmpl { return strct } if tmpl.Kind() == reflect.Ptr { if strct.CanAddr() { return strct.Addr() } ptr := reflect.New(tmpl) ptr.Set(strct) return ptr } return strct } // Set field. // // If field is a pointer the pointer will be set to the value. If field is a string, value will be // appended. If field is a slice, value will be appended to slice. // // For all other types, an attempt will be made to convert the string to the corresponding // type (int, float32, etc.). func setField(tokens []lexer.Token, strct reflect.Value, field structLexerField, fieldValue []reflect.Value) (err error) { // nolint: gocognit f := strct.FieldByIndex(field.Index) // Any kind of pointer, hydrate it first. if f.Kind() == reflect.Ptr { if f.IsNil() { fv := reflect.New(f.Type().Elem()).Elem() f.Set(fv.Addr()) f = fv } else { f = f.Elem() } } var pos lexer.Position if len(tokens) > 0 { pos = tokens[0].Pos } if f.Type() == tokenType { f.Set(reflect.ValueOf(tokens[0])) return nil } if f.Type() == tokensType { f.Set(reflect.ValueOf(tokens)) return nil } if f.CanAddr() { if d, ok := f.Addr().Interface().(Capture); ok { ifv := make([]string, 0, len(fieldValue)) for _, v := range fieldValue { ifv = append(ifv, v.Interface().(string)) } err = d.Capture(ifv) if err != nil { return Wrapf(pos, err, "failed to capture") } return nil } else if d, ok := f.Addr().Interface().(encoding.TextUnmarshaler); ok { for _, v := range fieldValue { if err := d.UnmarshalText([]byte(v.Interface().(string))); err != nil { return Wrapf(pos, err, "failed to unmarshal text") } } return nil } } if f.Kind() == reflect.Slice { sliceElemType := f.Type().Elem() if sliceElemType.Implements(captureType) || reflect.PtrTo(sliceElemType).Implements(captureType) { if sliceElemType.Kind() == reflect.Ptr { sliceElemType = sliceElemType.Elem() } for _, v := range fieldValue { d := reflect.New(sliceElemType).Interface().(Capture) if err := d.Capture([]string{v.Interface().(string)}); err != nil { return Wrapf(pos, err, "failed to capture") } eltValue := reflect.ValueOf(d) if f.Type().Elem().Kind() != reflect.Ptr { eltValue = eltValue.Elem() } f.Set(reflect.Append(f, eltValue)) } } else { fieldValue, err = conform(sliceElemType, fieldValue) if err != nil { return Wrapf(pos, err, "failed to conform") } f.Set(reflect.Append(f, fieldValue...)) } return nil } // Strings concatenate all captured tokens. if f.Kind() == reflect.String { fieldValue, err = conform(f.Type(), fieldValue) if err != nil { return Wrapf(pos, err, "failed to conform") } if len(fieldValue) == 0 { return nil } accumulated := f.String() for _, v := range fieldValue { accumulated += v.String() } f.SetString(accumulated) return nil } // Coalesce multiple tokens into one. This allows eg. ["-", "10"] to be captured as separate tokens but // parsed as a single string "-10". if len(fieldValue) > 1 { out := []string{} for _, v := range fieldValue { out = append(out, v.String()) } fieldValue = []reflect.Value{reflect.ValueOf(strings.Join(out, ""))} } fieldValue, err = conform(f.Type(), fieldValue) if err != nil { return Wrapf(pos, err, "failed to conform") } if len(fieldValue) == 0 { return nil // Nothing to capture, can happen when trying to get a partial parse tree } fv := fieldValue[0] switch f.Kind() { // nolint: exhaustive // Numeric types will increment if the token can not be coerced. case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if fv.Type() != f.Type() { f.SetInt(f.Int() + 1) } else { f.Set(fv) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: if fv.Type() != f.Type() { f.SetUint(f.Uint() + 1) } else { f.Set(fv) } case reflect.Float32, reflect.Float64: if fv.Type() != f.Type() { f.SetFloat(f.Float() + 1) } else { f.Set(fv) } case reflect.Bool, reflect.Struct, reflect.Interface: if f.Kind() == reflect.Bool && fv.Kind() == reflect.Bool { f.SetBool(fv.Bool()) break } if fv.Type() != f.Type() { return Errorf(pos, "value %q is not correct type %s", fv, f.Type()) } f.Set(fv) default: return Errorf(pos, "unsupported field type %s for field %s", f.Type(), field.Name) } return nil } golang-github-alecthomas-participle-v2-2.1.4/options.go000066400000000000000000000111171505300366400230670ustar00rootroot00000000000000package participle import ( "fmt" "io" "reflect" "github.com/alecthomas/participle/v2/lexer" ) // MaxLookahead can be used with UseLookahead to get pseudo-infinite // lookahead without the risk of pathological cases causing a stack // overflow. const MaxLookahead = 99999 // An Option to modify the behaviour of the Parser. type Option func(p *parserOptions) error // Lexer is an Option that sets the lexer to use with the given grammar. func Lexer(def lexer.Definition) Option { return func(p *parserOptions) error { p.lex = def return nil } } // UseLookahead allows branch lookahead up to "n" tokens. // // If parsing cannot be disambiguated before "n" tokens of lookahead, parsing will fail. // // Note that increasing lookahead has a minor performance impact, but also // reduces the accuracy of error reporting. // // If "n" is negative, it will be treated as "infinite" lookahead. // This can have a large impact on performance, and does not provide any // protection against stack overflow during parsing. // In most cases, using MaxLookahead will achieve the same results in practice, // but with a concrete upper bound to prevent pathological behavior in the parser. // Using infinite lookahead can be useful for testing, or for parsing especially // ambiguous grammars. Use at your own risk! func UseLookahead(n int) Option { return func(p *parserOptions) error { p.useLookahead = n return nil } } // CaseInsensitive allows the specified token types to be matched case-insensitively. // // Note that the lexer itself will also have to be case-insensitive; this option // just controls whether literals in the grammar are matched case insensitively. func CaseInsensitive(tokens ...string) Option { return func(p *parserOptions) error { for _, token := range tokens { p.caseInsensitive[token] = true } return nil } } // ParseTypeWith associates a custom parsing function with some interface type T. // When the parser encounters a value of type T, it will use the given parse function to // parse a value from the input. // // The parse function may return anything it wishes as long as that value satisfies the interface T. // However, only a single function can be defined for any type T. // If you want to have multiple parse functions returning types that satisfy the same interface, you'll // need to define new wrapper types for each one. // // This can be useful if you want to parse a DSL within the larger grammar, or if you want // to implement an optimized parsing scheme for some portion of the grammar. func ParseTypeWith[T any](parseFn func(*lexer.PeekingLexer) (T, error)) Option { return func(p *parserOptions) error { parseFnVal := reflect.ValueOf(parseFn) parseFnType := parseFnVal.Type() if parseFnType.Out(0).Kind() != reflect.Interface { return fmt.Errorf("ParseTypeWith: T must be an interface type (got %s)", parseFnType.Out(0)) } prodType := parseFnType.Out(0) p.customDefs = append(p.customDefs, customDef{prodType, parseFnVal}) return nil } } // Union associates several member productions with some interface type T. // Given members X, Y, Z, and W for a union type U, then the EBNF rule is: // // U = X | Y | Z | W . // // When the parser encounters a field of type T, it will attempt to parse each member // in sequence and take the first match. Because of this, the order in which the // members are defined is important. You must be careful to order your members appropriately. // // An example of a bad parse that can happen if members are out of order: // // If the first member matches A, and the second member matches A B, // and the source string is "AB", then the parser will only match A, and will not // try to parse the second member at all. func Union[T any](members ...T) Option { return func(p *parserOptions) error { var t T unionType := reflect.TypeOf(&t).Elem() if unionType.Kind() != reflect.Interface { return fmt.Errorf("union: union type must be an interface (got %s)", unionType) } memberTypes := make([]reflect.Type, 0, len(members)) for _, m := range members { memberTypes = append(memberTypes, reflect.TypeOf(m)) } p.unionDefs = append(p.unionDefs, unionDef{unionType, memberTypes}) return nil } } // ParseOption modifies how an individual parse is applied. type ParseOption func(p *parseContext) // Trace the parse to "w". func Trace(w io.Writer) ParseOption { return func(p *parseContext) { p.trace = w } } // AllowTrailing tokens without erroring. // // That is, do not error if a full parse completes but additional tokens remain. func AllowTrailing(ok bool) ParseOption { return func(p *parseContext) { p.allowTrailing = ok } } golang-github-alecthomas-participle-v2-2.1.4/parser.go000066400000000000000000000212651505300366400226750ustar00rootroot00000000000000package participle import ( "bytes" "fmt" "io" "reflect" "strings" "github.com/alecthomas/participle/v2/lexer" ) type unionDef struct { typ reflect.Type members []reflect.Type } type customDef struct { typ reflect.Type parseFn reflect.Value } type parserOptions struct { lex lexer.Definition rootType reflect.Type typeNodes map[reflect.Type]node useLookahead int caseInsensitive map[string]bool caseInsensitiveTokens map[lexer.TokenType]bool mappers []mapperByToken unionDefs []unionDef customDefs []customDef elide []string } // A Parser for a particular grammar and lexer. type Parser[G any] struct { parserOptions } // ParserForProduction returns a new parser for the given production in grammar G. func ParserForProduction[P, G any](parser *Parser[G]) (*Parser[P], error) { t := reflect.TypeOf(*new(P)) _, ok := parser.typeNodes[t] if !ok { return nil, fmt.Errorf("parser does not contain a production of type %s", t) } return (*Parser[P])(parser), nil } // MustBuild calls Build[G](options...) and panics if an error occurs. func MustBuild[G any](options ...Option) *Parser[G] { parser, err := Build[G](options...) if err != nil { panic(err) } return parser } // Build constructs a parser for the given grammar. // // If "Lexer()" is not provided as an option, a default lexer based on text/scanner will be used. This scans typical Go- // like tokens. // // See documentation for details. func Build[G any](options ...Option) (parser *Parser[G], err error) { // Configure Parser[G] struct with defaults + options. p := &Parser[G]{ parserOptions: parserOptions{ lex: lexer.TextScannerLexer, caseInsensitive: map[string]bool{}, useLookahead: 1, }, } for _, option := range options { if err = option(&p.parserOptions); err != nil { return nil, err } } symbols := p.lex.Symbols() if len(p.mappers) > 0 { mappers := map[lexer.TokenType][]Mapper{} for _, mapper := range p.mappers { if len(mapper.symbols) == 0 { mappers[lexer.EOF] = append(mappers[lexer.EOF], mapper.mapper) } else { for _, symbol := range mapper.symbols { if rn, ok := symbols[symbol]; !ok { return nil, fmt.Errorf("mapper %#v uses unknown token %q", mapper, symbol) } else { // nolint: golint mappers[rn] = append(mappers[rn], mapper.mapper) } } } } p.lex = &mappingLexerDef{p.lex, func(t lexer.Token) (lexer.Token, error) { combined := make([]Mapper, 0, len(mappers[t.Type])+len(mappers[lexer.EOF])) combined = append(combined, mappers[lexer.EOF]...) combined = append(combined, mappers[t.Type]...) var err error for _, m := range combined { t, err = m(t) if err != nil { return t, err } } return t, nil }} } context := newGeneratorContext(p.lex) if err := context.addCustomDefs(p.customDefs); err != nil { return nil, err } if err := context.addUnionDefs(p.unionDefs); err != nil { return nil, err } var grammar G v := reflect.ValueOf(&grammar) if v.Kind() == reflect.Interface { v = v.Elem() } p.rootType = v.Type() rootNode, err := context.parseType(p.rootType) if err != nil { return nil, err } if err := validate(rootNode); err != nil { return nil, err } p.typeNodes = context.typeNodes p.typeNodes[p.rootType] = rootNode p.setCaseInsensitiveTokens() return p, nil } // Lexer returns the parser's builtin lexer. func (p *Parser[G]) Lexer() lexer.Definition { return p.lex } // Lex uses the parser's lexer to tokenise input. // Parameter filename is used as an opaque prefix in error messages. func (p *Parser[G]) Lex(filename string, r io.Reader) ([]lexer.Token, error) { lex, err := p.lex.Lex(filename, r) if err != nil { return nil, err } tokens, err := lexer.ConsumeAll(lex) return tokens, err } // ParseFromLexer into grammar v which must be of the same type as the grammar passed to // Build(). // // This may return a Error. func (p *Parser[G]) ParseFromLexer(lex *lexer.PeekingLexer, options ...ParseOption) (*G, error) { v := new(G) rv := reflect.ValueOf(v) parseNode, err := p.parseNodeFor(rv) if err != nil { return nil, err } ctx := newParseContext(lex, p.useLookahead, p.caseInsensitiveTokens) defer func() { *lex = ctx.PeekingLexer }() for _, option := range options { option(&ctx) } // If the grammar implements Parseable, use it. if parseable, ok := any(v).(Parseable); ok { return v, p.rootParseable(&ctx, parseable) } return v, p.parseOne(&ctx, parseNode, rv) } func (p *Parser[G]) setCaseInsensitiveTokens() { p.caseInsensitiveTokens = map[lexer.TokenType]bool{} for sym, tt := range p.lex.Symbols() { if p.caseInsensitive[sym] { p.caseInsensitiveTokens[tt] = true } } } func (p *Parser[G]) parse(lex lexer.Lexer, options ...ParseOption) (v *G, err error) { peeker, err := lexer.Upgrade(lex, p.getElidedTypes()...) if err != nil { return nil, err } return p.ParseFromLexer(peeker, options...) } // Parse from r into grammar v which must be of the same type as the grammar passed to // Build(). Parameter filename is used as an opaque prefix in error messages. // // This may return an Error. func (p *Parser[G]) Parse(filename string, r io.Reader, options ...ParseOption) (v *G, err error) { if filename == "" { filename = lexer.NameOfReader(r) } lex, err := p.lex.Lex(filename, r) if err != nil { return nil, err } return p.parse(lex, options...) } // ParseString from s into grammar v which must be of the same type as the grammar passed to // Build(). Parameter filename is used as an opaque prefix in error messages. // // This may return an Error. func (p *Parser[G]) ParseString(filename string, s string, options ...ParseOption) (v *G, err error) { var lex lexer.Lexer if sl, ok := p.lex.(lexer.StringDefinition); ok { lex, err = sl.LexString(filename, s) } else { lex, err = p.lex.Lex(filename, strings.NewReader(s)) } if err != nil { return nil, err } return p.parse(lex, options...) } // ParseBytes from b into grammar v which must be of the same type as the grammar passed to // Build(). Parameter filename is used as an opaque prefix in error messages. // // This may return an Error. func (p *Parser[G]) ParseBytes(filename string, b []byte, options ...ParseOption) (v *G, err error) { var lex lexer.Lexer if sl, ok := p.lex.(lexer.BytesDefinition); ok { lex, err = sl.LexBytes(filename, b) } else { lex, err = p.lex.Lex(filename, bytes.NewReader(b)) } if err != nil { return nil, err } return p.parse(lex, options...) } func (p *Parser[G]) parseOne(ctx *parseContext, parseNode node, rv reflect.Value) error { err := p.parseInto(ctx, parseNode, rv) if err != nil { return err } token := ctx.Peek() if !token.EOF() && !ctx.allowTrailing { return ctx.DeepestError(&UnexpectedTokenError{Unexpected: *token}) } return nil } func (p *Parser[G]) parseInto(ctx *parseContext, parseNode node, rv reflect.Value) error { if rv.IsNil() { return fmt.Errorf("target must be a non-nil pointer to a struct or interface, but is a nil %s", rv.Type()) } pv, err := p.typeNodes[rv.Type().Elem()].Parse(ctx, rv.Elem()) if len(pv) > 0 && pv[0].Type() == rv.Elem().Type() { rv.Elem().Set(reflect.Indirect(pv[0])) } if err != nil { return err } if pv == nil { token := ctx.Peek() return ctx.DeepestError(&UnexpectedTokenError{Unexpected: *token}) } return nil } func (p *Parser[G]) rootParseable(ctx *parseContext, parseable Parseable) error { if err := parseable.Parse(&ctx.PeekingLexer); err != nil { if err == NextMatch { err = &UnexpectedTokenError{Unexpected: *ctx.Peek()} } else { err = &ParseError{Msg: err.Error(), Pos: ctx.Peek().Pos} } return ctx.DeepestError(err) } peek := ctx.Peek() if !peek.EOF() && !ctx.allowTrailing { return ctx.DeepestError(&UnexpectedTokenError{Unexpected: *peek}) } return nil } func (p *Parser[G]) getElidedTypes() []lexer.TokenType { symbols := p.lex.Symbols() elideTypes := make([]lexer.TokenType, 0, len(p.elide)) for _, elide := range p.elide { rn, ok := symbols[elide] if !ok { panic(fmt.Errorf("Elide() uses unknown token %q", elide)) } elideTypes = append(elideTypes, rn) } return elideTypes } func (p *Parser[G]) parseNodeFor(v reflect.Value) (node, error) { t := v.Type() if t.Kind() == reflect.Interface { t = t.Elem() } if t.Kind() != reflect.Ptr || (t.Elem().Kind() != reflect.Struct && t.Elem().Kind() != reflect.Interface) { return nil, fmt.Errorf("expected a pointer to a struct or interface, but got %s", t) } parseNode := p.typeNodes[t] if parseNode == nil { t = t.Elem() parseNode = p.typeNodes[t] } if parseNode == nil { return nil, fmt.Errorf("parser does not know how to parse values of type %s", t) } return parseNode, nil } golang-github-alecthomas-participle-v2-2.1.4/parser_test.go000066400000000000000000001324301505300366400237310ustar00rootroot00000000000000package participle_test import ( "errors" "fmt" "math" "net" "reflect" "strconv" "strings" "testing" "text/scanner" "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" ) func TestProductionCapture(t *testing.T) { type testCapture struct { A string `@Test` } _, err := participle.Build[testCapture]() assert.Error(t, err) } func TestTermCapture(t *testing.T) { type grammar struct { A string `@"."*` } parser := mustTestParser[grammar](t) expected := &grammar{"..."} actual, err := parser.ParseString("", "...") assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestParseScalar(t *testing.T) { type grammar struct { A string `@"one"` } parser := mustTestParser[grammar](t) actual, err := parser.ParseString("", "one") assert.NoError(t, err) assert.Equal(t, &grammar{"one"}, actual) } func TestParseGroup(t *testing.T) { type grammar struct { A string `@("one" | "two")` } parser := mustTestParser[grammar](t) actual, err := parser.ParseString("", "one") assert.NoError(t, err) assert.Equal(t, &grammar{"one"}, actual) actual, err = parser.ParseString("", "two") assert.NoError(t, err) assert.Equal(t, &grammar{"two"}, actual) } func TestParseAlternative(t *testing.T) { type grammar struct { A string `@"one" |` B string `@"two"` } parser := mustTestParser[grammar](t) actual, err := parser.ParseString("", "one") assert.NoError(t, err) assert.Equal(t, &grammar{A: "one"}, actual) actual, err = parser.ParseString("", "two") assert.NoError(t, err) assert.Equal(t, &grammar{B: "two"}, actual) } func TestParseSequence(t *testing.T) { type grammar struct { A string `@"one"` B string `@"two"` C string `@"three"` } parser := mustTestParser[grammar](t) expected := &grammar{"one", "two", "three"} actual, err := parser.ParseString("", "one two three") assert.NoError(t, err) assert.Equal(t, expected, actual) expected = &grammar{} actual, err = parser.ParseString("", "moo") assert.Error(t, err) assert.Equal(t, expected, actual) } func TestNested(t *testing.T) { type nestedInner struct { B string `@"one"` C string `@"two"` } type testNested struct { A *nestedInner `@@` } parser := mustTestParser[testNested](t) expected := &testNested{A: &nestedInner{B: "one", C: "two"}} actual, err := parser.ParseString("", "one two") assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestAccumulateNested(t *testing.T) { type nestedInner struct { B string `@"one"` C string `@"two"` } type testAccumulateNested struct { A []*nestedInner `@@+` } parser := mustTestParser[testAccumulateNested](t) expected := &testAccumulateNested{A: []*nestedInner{{B: "one", C: "two"}, {B: "one", C: "two"}}} actual, err := parser.ParseString("", "one two one two") assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestRepetitionNoMatch(t *testing.T) { type grammar struct { A []string `@"."*` } parser := mustTestParser[grammar](t) expected := &grammar{} actual, err := parser.ParseString("", ``) assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestRepetition(t *testing.T) { type grammar struct { A []string `@"."*` } parser := mustTestParser[grammar](t) expected := &grammar{A: []string{".", ".", "."}} actual, err := parser.ParseString("", `...`) assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestRepetitionAcrossFields(t *testing.T) { type testRepetition struct { A []string `@"."*` B *string `(@"b" |` C *string ` @"c")` } parser := mustTestParser[testRepetition](t) b := "b" c := "c" expected := &testRepetition{ A: []string{".", ".", "."}, B: &b, } actual, err := parser.ParseString("", "...b") assert.NoError(t, err) assert.Equal(t, expected, actual) expected = &testRepetition{ A: []string{".", ".", "."}, C: &c, } actual, err = parser.ParseString("", "...c") assert.NoError(t, err) assert.Equal(t, expected, actual) expected = &testRepetition{ B: &b, } actual, err = parser.ParseString("", "b") assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestAccumulateString(t *testing.T) { type customString string type testAccumulateString struct { A customString `@"."+` } parser := mustTestParser[testAccumulateString](t) expected := &testAccumulateString{ A: "...", } actual, err := parser.ParseString("", "...") assert.NoError(t, err) assert.Equal(t, expected, actual) } type Group struct { Expression *Expression `"(" @@ ")"` } type LookaheadGroup struct { Expression *Expression `"(" "?" ("=" | "!") @@ ")"` } type EBNFOption struct { Expression *Expression `"[" @@ "]"` } type Repetition struct { Expression *Expression `"{" @@ "}"` } type Negation struct { Expression *Expression `"!" @@` } type Literal struct { Start string `@String` } type Range struct { Start string `@String` End string `"…" @String` } type Term struct { Name string `@Ident |` Literal *Literal `@@ |` Range *Range `@@ |` Group *Group `@@ |` LookaheadGroup *LookaheadGroup `@@ |` Option *EBNFOption `@@ |` Repetition *Repetition `@@ |` Negation *Negation `@@` } type Sequence struct { Terms []*Term `@@+` } type Expression struct { Alternatives []*Sequence `@@ ( "|" @@ )*` } type Production struct { Name string `@Ident "="` Expression []*Expression `@@+ "."` } type EBNF struct { Productions []*Production `@@*` } func TestEBNFParser(t *testing.T) { parser := mustTestParser[EBNF](t, participle.Unquote()) expected := &EBNF{ Productions: []*Production{ { Name: "Production", Expression: []*Expression{ { Alternatives: []*Sequence{ { Terms: []*Term{ {Name: "name"}, {Literal: &Literal{Start: "="}}, { Option: &EBNFOption{ Expression: &Expression{ Alternatives: []*Sequence{ { Terms: []*Term{ {Name: "Expression"}, }, }, }, }, }, }, {Literal: &Literal{Start: "."}}, }, }, }, }, }, }, { Name: "Expression", Expression: []*Expression{ { Alternatives: []*Sequence{ { Terms: []*Term{ {Name: "Alternative"}, { Repetition: &Repetition{ Expression: &Expression{ Alternatives: []*Sequence{ { Terms: []*Term{ {Literal: &Literal{Start: "|"}}, {Name: "Alternative"}, }, }, }, }, }, }, }, }, }, }, }, }, { Name: "Alternative", Expression: []*Expression{ { Alternatives: []*Sequence{ { Terms: []*Term{ {Name: "Term"}, { Repetition: &Repetition{ Expression: &Expression{ Alternatives: []*Sequence{ { Terms: []*Term{ {Name: "Term"}, }, }, }, }, }, }, }, }, }, }, }, }, { Name: "Term", Expression: []*Expression{ { Alternatives: []*Sequence{ {Terms: []*Term{{Name: "name"}}}, { Terms: []*Term{ {Name: "token"}, { Option: &EBNFOption{ Expression: &Expression{ Alternatives: []*Sequence{ { Terms: []*Term{ {Literal: &Literal{Start: "…"}}, {Name: "token"}, }, }, }, }, }, }, }, }, {Terms: []*Term{{Literal: &Literal{Start: "@@"}}}}, {Terms: []*Term{{Name: "Group"}}}, {Terms: []*Term{{Name: "EBNFOption"}}}, {Terms: []*Term{{Name: "Repetition"}}}, }, }, }, }, { Name: "Group", Expression: []*Expression{ { Alternatives: []*Sequence{ { Terms: []*Term{ {Literal: &Literal{Start: "("}}, {Name: "Expression"}, {Literal: &Literal{Start: ")"}}, }, }, }, }, }, }, { Name: "EBNFOption", Expression: []*Expression{ { Alternatives: []*Sequence{ { Terms: []*Term{ {Literal: &Literal{Start: "["}}, {Name: "Expression"}, {Literal: &Literal{Start: "]"}}, }, }, }, }, }, }, { Name: "Repetition", Expression: []*Expression{ { Alternatives: []*Sequence{ { Terms: []*Term{ {Literal: &Literal{Start: "{"}}, {Name: "Expression"}, {Literal: &Literal{Start: "}"}}, }, }, }, }, }, }, }, } actual, err := parser.ParseString("", strings.TrimSpace(` Production = name "=" [ Expression ] "." . Expression = Alternative { "|" Alternative } . Alternative = Term { Term } . Term = name | token [ "…" token ] | "@@" | Group | EBNFOption | Repetition . Group = "(" Expression ")" . EBNFOption = "[" Expression "]" . Repetition = "{" Expression "}" . `)) assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestParseExpression(t *testing.T) { type testNestA struct { A string `":" @"a"*` } type testNestB struct { B string `";" @"b"*` } type testExpression struct { A *testNestA `@@ |` B *testNestB `@@` } parser := mustTestParser[testExpression](t) expected := &testExpression{ B: &testNestB{ B: "b", }, } actual, err := parser.ParseString("", ";b") assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestParseOptional(t *testing.T) { type testOptional struct { A string `( @"a" @"b" )?` B string `@"c"` } parser := mustTestParser[testOptional](t) expected := &testOptional{B: "c"} actual, err := parser.ParseString("", `c`) assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestHello(t *testing.T) { type testHello struct { Hello string `@"hello"` To string `@String` } parser := mustTestParser[testHello](t, participle.Unquote()) expected := &testHello{"hello", `Bobby Brown`} actual, err := parser.ParseString("", `hello "Bobby Brown"`) assert.NoError(t, err) assert.Equal(t, expected, actual) } func mustTestParser[G any](t *testing.T, options ...participle.Option) *participle.Parser[G] { t.Helper() parser, err := participle.Build[G](options...) assert.NoError(t, err) return parser } func BenchmarkEBNFParser(b *testing.B) { parser, err := participle.Build[EBNF]() assert.NoError(b, err) b.ResetTimer() source := strings.TrimSpace(` Production = name "=" [ Expression ] "." . Expression = Alternative { "|" Alternative } . Alternative = Term { Term } . Term = name | token [ "…" token ] | "@@" | Group | EBNFOption | Repetition . Group = "(" Expression ")" . EBNFOption = "[" Expression "]" . Repetition = "{" Expression "}" . `) for i := 0; i < b.N; i++ { _, _ = parser.ParseString("", source) } } func TestRepeatAcrossFields(t *testing.T) { type grammar struct { A string `( @("." ">") |` B string ` @("," "<") )*` } parser := mustTestParser[grammar](t) expected := &grammar{A: ".>.>.>.>", B: ",<,<,<"} actual, err := parser.ParseString("", ".>,<.>.>,<.>,<") assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestPosInjection(t *testing.T) { type subgrammar struct { Pos lexer.Position B string `@","*` EndPos lexer.Position } type grammar struct { Pos lexer.Position A string `@"."*` B *subgrammar `@@` C string `@"."` EndPos lexer.Position } parser := mustTestParser[grammar](t) expected := &grammar{ Pos: lexer.Position{ Offset: 3, Line: 1, Column: 4, }, A: "...", B: &subgrammar{ B: ",,,", Pos: lexer.Position{ Offset: 6, Line: 1, Column: 7, }, EndPos: lexer.Position{ Offset: 9, Line: 1, Column: 10, }, }, C: ".", EndPos: lexer.Position{ Offset: 10, Line: 1, Column: 11, }, } actual, err := parser.ParseString("", " ...,,,.") assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestPosInjectionCustomPosition(t *testing.T) { type Position struct { Filename string Offset int Line int Column int } type grammar struct { Pos Position EndPos Position Name string `@Ident` } parser := mustTestParser[grammar](t) g, err := parser.ParseString("", " foo") assert.NoError(t, err) assert.Equal(t, Position{Offset: 2, Line: 1, Column: 3}, g.Pos) assert.Equal(t, Position{Offset: 5, Line: 1, Column: 6}, g.EndPos) } type parseableCount int func (c *parseableCount) Capture(values []string) error { *c += parseableCount(len(values)) return nil } func TestCaptureInterface(t *testing.T) { type grammar struct { Count parseableCount `@"a"*` } parser := mustTestParser[grammar](t) expected := &grammar{Count: 3} actual, err := parser.ParseString("", "a a a") assert.NoError(t, err) assert.Equal(t, expected, actual) } type unmarshallableCount int func (u *unmarshallableCount) UnmarshalText(text []byte) error { *u += unmarshallableCount(len(text)) return nil } func TestTextUnmarshalerInterface(t *testing.T) { type grammar struct { Count unmarshallableCount `{ @"a" }` } parser := mustTestParser[grammar](t) expected := &grammar{Count: 3} actual, err := parser.ParseString("", "a a a") assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestLiteralTypeConstraint(t *testing.T) { type grammar struct { Literal string `@"123456":String` } parser := mustTestParser[grammar](t, participle.Unquote()) expected := &grammar{Literal: "123456"} actual, err := parser.ParseString("", `"123456"`) assert.NoError(t, err) assert.Equal(t, expected, actual) _, err = parser.ParseString("", `123456`) assert.Error(t, err) } type nestedCapture struct { Tokens []string } func (n *nestedCapture) Capture(tokens []string) error { n.Tokens = tokens return nil } func TestStructCaptureInterface(t *testing.T) { type grammar struct { Capture *nestedCapture `@String` } parser, err := participle.Build[grammar](participle.Unquote()) assert.NoError(t, err) expected := &grammar{Capture: &nestedCapture{Tokens: []string{"hello"}}} actual, err := parser.ParseString("", `"hello"`) assert.NoError(t, err) assert.Equal(t, expected, actual) } type parseableStruct struct { Tokens []string } func (p *parseableStruct) Parse(lex *lexer.PeekingLexer) error { for { tok := lex.Next() if tok.EOF() { break } p.Tokens = append(p.Tokens, tok.Value) } return nil } func TestParseable(t *testing.T) { type grammar struct { Inner *parseableStruct `@@` } parser, err := participle.Build[grammar](participle.Unquote()) assert.NoError(t, err) expected := &grammar{Inner: &parseableStruct{Tokens: []string{"hello", "123", "world"}}} actual, err := parser.ParseString("", `hello 123 "world"`) assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestStringConcat(t *testing.T) { type grammar struct { Field string `@"."+` } parser, err := participle.Build[grammar]() assert.NoError(t, err) expected := &grammar{"...."} actual, err := parser.ParseString("", `. . . .`) assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestParseIntSlice(t *testing.T) { type grammar struct { Field []int `@Int+` } parser := mustTestParser[grammar](t) expected := &grammar{[]int{1, 2, 3, 4}} actual, err := parser.ParseString("", `1 2 3 4`) assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestEmptyStructErrorsNotPanicsIssue21(t *testing.T) { type grammar struct { Foo struct{} `@@` } _, err := participle.Build[grammar]() assert.Error(t, err) } func TestMultipleTokensIntoScalar(t *testing.T) { type grammar struct { Field int `@("-"? Int)` } p, err := participle.Build[grammar]() assert.NoError(t, err) actual, err := p.ParseString("", `- 10`) assert.NoError(t, err) assert.Equal(t, -10, actual.Field) _, err = p.ParseString("", `x`) assert.EqualError(t, err, `1:1: unexpected token "x" (expected )`) _, err = p.ParseString("", ` `) assert.EqualError(t, err, `1:2: unexpected token "" (expected )`) } type posMixin struct { Pos lexer.Position } func TestMixinPosIsPopulated(t *testing.T) { type grammar struct { posMixin Int int `@Int` } p := mustTestParser[grammar](t) actual, err := p.ParseString("", "10") assert.NoError(t, err) assert.Equal(t, 10, actual.Int) assert.Equal(t, 1, actual.Pos.Column) assert.Equal(t, 1, actual.Pos.Line) } type testParserMixin struct { A string `@Ident` B string `@Ident` } func TestMixinFieldsAreParsed(t *testing.T) { type grammar struct { testParserMixin C string `@Ident` } p := mustTestParser[grammar](t) actual, err := p.ParseString("", "one two three") assert.NoError(t, err) assert.Equal(t, "one", actual.A) assert.Equal(t, "two", actual.B) assert.Equal(t, "three", actual.C) } func TestNestedOptional(t *testing.T) { type grammar struct { Args []string `"(" [ @Ident ( "," @Ident )* ] ")"` } p := mustTestParser[grammar](t) _, err := p.ParseString("", `()`) assert.NoError(t, err) _, err = p.ParseString("", `(a)`) assert.NoError(t, err) _, err = p.ParseString("", `(a, b, c)`) assert.NoError(t, err) _, err = p.ParseString("", `(1)`) assert.Error(t, err) } func TestInvalidNumbers(t *testing.T) { type grammar struct { Int8 int8 ` "int8" @Int` Int16 int16 `| "int16" @Int` Int32 int32 `| "int32" @Int` Int64 int64 `| "int64" @Int` Uint8 uint8 `| "uint8" @Int` Uint16 uint16 `| "uint16" @Int` Uint32 uint32 `| "uint32" @Int` Uint64 uint64 `| "uint64" @Int` Float32 float32 `| "float32" @Float` Float64 float64 `| "float64" @Float` } p := mustTestParser[grammar](t) tests := []struct { name string input string expected *grammar err bool }{ {name: "ValidInt8", input: "int8 127", expected: &grammar{Int8: 127}}, {name: "InvalidInt8", input: "int8 129", err: true}, {name: "ValidInt16", input: "int16 32767", expected: &grammar{Int16: 32767}}, {name: "InvalidInt16", input: "int16 32768", err: true}, {name: "ValidInt32", input: fmt.Sprintf("int32 %d", math.MaxInt32), expected: &grammar{Int32: math.MaxInt32}}, {name: "InvalidInt32", input: fmt.Sprintf("int32 %d", int64(math.MaxInt32+1)), err: true}, {name: "ValidInt64", input: fmt.Sprintf("int64 %d", int64(math.MaxInt64)), expected: &grammar{Int64: math.MaxInt64}}, {name: "InvalidInt64", input: "int64 9223372036854775808", err: true}, {name: "ValidFloat64", input: "float64 1234.5", expected: &grammar{Float64: 1234.5}}, {name: "InvalidFloat64", input: "float64 asdf", err: true}, } for _, test := range tests { // nolint: scopelint t.Run(test.name, func(t *testing.T) { actual, err := p.ParseString("", test.input) if test.err { assert.Error(t, err, fmt.Sprintf("%#v", actual)) } else { assert.NoError(t, err) assert.Equal(t, test.expected, actual) } }) } } // We'd like this to work, but it can wait. func TestPartialAST(t *testing.T) { type grammar struct { Succeed string `@Ident` Fail string `@"foo"` } p := mustTestParser[grammar](t) actual, err := p.ParseString("", `foo bar`) assert.Error(t, err) expected := &grammar{Succeed: "foo"} assert.Equal(t, expected, actual) } func TestCaseInsensitive(t *testing.T) { type grammar struct { Select string `"select":Keyword @Ident` } // lex := lexer.MustStateful(lexer.Regexp( // `(?i)(?PSELECT)` + // `|(?P\w+)` + // `|(\s+)`, // )) lex := lexer.MustSimple([]lexer.SimpleRule{ {"Keyword", `(?i)SELECT`}, {"Ident", `\w+`}, {"whitespace", `\s+`}, }) p := mustTestParser[grammar](t, participle.Lexer(lex), participle.CaseInsensitive("Keyword")) actual, err := p.ParseString("", `SELECT foo`) expected := &grammar{"foo"} assert.NoError(t, err) assert.Equal(t, expected, actual) actual, err = p.ParseString("", `select foo`) assert.NoError(t, err) assert.Equal(t, expected, actual) } func TestTokenAfterRepeatErrors(t *testing.T) { type grammar struct { Text string `@Ident* "foo"` } p := mustTestParser[grammar](t) _, err := p.ParseString("", ``) assert.Error(t, err) } func TestEOFAfterRepeat(t *testing.T) { type grammar struct { Text string `@Ident*` } p := mustTestParser[grammar](t) _, err := p.ParseString("", ``) assert.NoError(t, err) } func TestTrailing(t *testing.T) { type grammar struct { Text string `@Ident` } p := mustTestParser[grammar](t) _, err := p.ParseString("", `foo bar`) assert.Error(t, err) } type modifierTest[G any] struct { name string input string expected string fail bool } func (test modifierTest[G]) test(t *testing.T) { t.Helper() t.Run(test.name, func(t *testing.T) { p := mustTestParser[G](t) grammar, err := p.ParseString("", test.input) if test.fail { assert.Error(t, err) } else { assert.NoError(t, err) actual := reflect.ValueOf(grammar).Elem().FieldByName("A").String() assert.Equal(t, test.expected, actual) } }) } func TestModifiers(t *testing.T) { type nonEmptyGrammar struct { A string `@( ("x"? "y"? "z"?)! "b" )` } tests := []interface{ test(t *testing.T) }{ modifierTest[nonEmptyGrammar]{name: "NonMatchingOptionalNonEmpty", input: "b", fail: true, }, modifierTest[nonEmptyGrammar]{name: "NonEmptyMatch", input: "x b", expected: "xb", }, modifierTest[nonEmptyGrammar]{name: "NonEmptyMatchAll", input: "x y z b", expected: "xyzb", }, modifierTest[nonEmptyGrammar]{name: "NonEmptyMatchSome", input: "x z b", expected: "xzb", }, modifierTest[struct { A string `@( "a"? "b" )` }]{name: "MatchingOptional", input: "a b", expected: "ab", }, modifierTest[struct { A string `@( "a"? "b" )` }]{name: "NonMatchingOptionalIsSkipped", input: "b", expected: "b", }, modifierTest[struct { A string `@( "a"+ )` }]{name: "MatchingOneOrMore", input: "a a a a a", expected: "aaaaa", }, modifierTest[struct { A string `@( "a"+ )` }]{name: "NonMatchingOneOrMore", input: "", fail: true, }, modifierTest[struct { A string `@( "a"* )` }]{name: "MatchingZeroOrMore", input: "aaaaaaa", fail: true, }, modifierTest[struct { A string `@( "a"* )` }]{name: "NonMatchingZeroOrMore", input: "", }, } for _, test := range tests { test.test(t) } } func TestNonEmptyMatchWithOptionalGroup(t *testing.T) { type term struct { Minus bool `@'-'?` Name string `@Ident` } type grammar struct { Start term `parser:"'[' (@@?"` End term `parser:" (':' @@)?)! ']'"` } p := mustTestParser[grammar](t) result, err := p.ParseString("", "[-x]") assert.NoError(t, err) assert.Equal(t, &grammar{Start: term{Minus: true, Name: "x"}}, result) result, err = p.ParseString("", "[a:-b]") assert.NoError(t, err) assert.Equal(t, &grammar{Start: term{Name: "a"}, End: term{Minus: true, Name: "b"}}, result) result, err = p.ParseString("", "[:end]") assert.NoError(t, err) assert.Equal(t, &grammar{End: term{Name: "end"}}, result) _, err = p.ParseString("", "[]") assert.EqualError(t, err, `1:2: sub-expression (Term? (":" Term)?)! cannot be empty`) } func TestIssue60(t *testing.T) { type grammar struct { A string `@("one" | | "two")` } _, err := participle.Build[grammar]() assert.Error(t, err) } type Issue62Bar struct { A int } func (x *Issue62Bar) Parse(lex *lexer.PeekingLexer) error { token := lex.Next() var err error x.A, err = strconv.Atoi(token.Value) return err } type Issue62Foo struct { Bars []Issue62Bar `parser:"@@+"` } func TestIssue62(t *testing.T) { _, err := participle.Build[Issue62Foo]() assert.NoError(t, err) } // nolint: structcheck, unused func TestIssue71(t *testing.T) { type Sub struct { name string `@Ident` } type grammar struct { pattern *Sub `@@` } _, err := participle.Build[grammar]() assert.Error(t, err) } func TestAllowTrailing(t *testing.T) { type G struct { Name string `@Ident` } p, err := participle.Build[G]() assert.NoError(t, err) _, err = p.ParseString("", `hello world`) assert.Error(t, err) g, err := p.ParseString("", `hello world`, participle.AllowTrailing(true)) assert.NoError(t, err) assert.Equal(t, &G{"hello"}, g) } func TestDisjunctionErrorReporting(t *testing.T) { type statement struct { Add bool ` @"add"` Remove bool `| @"remove"` } type grammar struct { Statements []*statement `"{" ( @@ )* "}"` } p := mustTestParser[grammar](t) _, err := p.ParseString("", `{ add foo }`) // TODO: This should produce a more useful error. This is returned by sequence.Parse(). assert.EqualError(t, err, `1:7: unexpected token "foo" (expected "}")`) } func TestCustomInt(t *testing.T) { type MyInt int type G struct { Value MyInt `@Int` } p, err := participle.Build[G]() assert.NoError(t, err) g, err := p.ParseString("", `42`) assert.NoError(t, err) assert.Equal(t, &G{42}, g) } func TestBoolIfSet(t *testing.T) { type G struct { Value bool `@"true"?` } p, err := participle.Build[G]() assert.NoError(t, err) g, err := p.ParseString("", `true`) assert.NoError(t, err) assert.Equal(t, &G{true}, g) g, err = p.ParseString("", ``) assert.NoError(t, err) assert.Equal(t, &G{false}, g) } func TestCustomBoolIfSet(t *testing.T) { type MyBool bool type G struct { Value MyBool `@"true"?` } p, err := participle.Build[G]() assert.NoError(t, err) g, err := p.ParseString("", `true`) assert.NoError(t, err) assert.Equal(t, &G{true}, g) g, err = p.ParseString("", ``) assert.NoError(t, err) assert.Equal(t, &G{false}, g) } func TestPointerToList(t *testing.T) { type grammar struct { List *[]string `@Ident*` } p := mustTestParser[grammar](t) ast, err := p.ParseString("", `foo bar`) assert.NoError(t, err) l := []string{"foo", "bar"} assert.Equal(t, &grammar{List: &l}, ast) } // I'm not sure if this is a problem that should be solved like this. // func TestMatchHydratesNullFields(t *testing.T) { // type grammar struct { // List []string `"{" @Ident* "}"` // } // p := mustTestParser[grammar](t) // ast := &grammar{} // err := p.ParseString(`{}`, ast) // assert.NoError(t, err) // assert.NotNil(t, ast.List) // } func TestNegation(t *testing.T) { type grammar struct { EverythingUntilSemicolon *[]string `@!';'* @';'` } p := mustTestParser[grammar](t) ast, err := p.ParseString("", `hello world ;`) assert.NoError(t, err) assert.Equal(t, &[]string{"hello", "world", ";"}, ast.EverythingUntilSemicolon) _, err = p.ParseString("", `hello world`) assert.Error(t, err) } func TestNegationWithPattern(t *testing.T) { type grammar struct { EverythingMoreComplex *[]string `@!(';' String)* @';' @String` } p := mustTestParser[grammar](t, participle.Unquote()) // j, err := json.MarshalIndent(p.root, "", " ") // log.Print(j) // log.Print(ebnf(p.root)) ast, err := p.ParseString("", `hello world ; "some-str"`) assert.NoError(t, err) assert.Equal(t, &[]string{"hello", "world", ";", `some-str`}, ast.EverythingMoreComplex) ast, err = p.ParseString("", `hello ; world ; "hey"`) assert.NoError(t, err) assert.Equal(t, &[]string{"hello", ";", "world", ";", `hey`}, ast.EverythingMoreComplex) _, err = p.ParseString("", `hello ; world ;`) assert.Error(t, err) } func TestNegationWithDisjunction(t *testing.T) { type grammar struct { EverythingMoreComplex *[]string `@!(';' | ',')* @(';' | ',')` } // Note: we need more lookahead since (';' String) needs some before failing to match p := mustTestParser[grammar](t) ast, err := p.ParseString("", `hello world ;`) assert.NoError(t, err) assert.Equal(t, &[]string{"hello", "world", ";"}, ast.EverythingMoreComplex) ast, err = p.ParseString("", `hello world , `) assert.NoError(t, err) assert.Equal(t, &[]string{"hello", "world", ","}, ast.EverythingMoreComplex) } func TestNegationLookaheadError(t *testing.T) { type grammar struct { Stuff []string `@Ident @!('.' | '#') @Ident` } p := mustTestParser[grammar](t) ast, err := p.ParseString("", `hello, world`) assert.NoError(t, err) assert.Equal(t, []string{"hello", ",", "world"}, ast.Stuff) _, err = p.ParseString("", `hello . world`) assert.EqualError(t, err, `1:7: unexpected token "."`) } func TestLookaheadGroup_Positive_SingleToken(t *testing.T) { type val struct { Str string ` @String` Int int `| @Int` } type op struct { Op string `@('+' | '*' (?= @Int))` Operand val `@@` } type sum struct { Left val `@@` Ops []op `@@*` } p := mustTestParser[sum](t) ast, err := p.ParseString("", `"x" + "y" + 4`) assert.NoError(t, err) assert.Equal(t, &sum{Left: val{Str: `"x"`}, Ops: []op{{"+", val{Str: `"y"`}}, {"+", val{Int: 4}}}}, ast) ast, err = p.ParseString("", `"a" * 4 + "b"`) assert.NoError(t, err) assert.Equal(t, &sum{Left: val{Str: `"a"`}, Ops: []op{{"*", val{Int: 4}}, {"+", val{Str: `"b"`}}}}, ast) ast, err = p.ParseString("", `1 * 2 * 3`) assert.NoError(t, err) assert.Equal(t, &sum{Left: val{Int: 1}, Ops: []op{{"*", val{Int: 2}}, {"*", val{Int: 3}}}}, ast) _, err = p.ParseString("", `"a" * 'x' + "b"`) assert.EqualError(t, err, `1:7: unexpected token "'x'"`) _, err = p.ParseString("", `4 * 2 + 0 * "b"`) assert.EqualError(t, err, `1:13: unexpected token "\"b\""`) } func TestLookaheadGroup_Negative_SingleToken(t *testing.T) { type variable struct { Name string `@Ident` } type grammar struct { Identifiers []variable `((?! 'except'|'end') @@)*` Except *variable `('except' @@)? 'end'` } p := mustTestParser[grammar](t) ast, err := p.ParseString("", `one two three exception end`) assert.NoError(t, err) assert.Equal(t, []variable{{"one"}, {"two"}, {"three"}, {"exception"}}, ast.Identifiers) assert.Zero(t, ast.Except) ast, err = p.ParseString("", `anything except this end`) assert.NoError(t, err) assert.Equal(t, []variable{{"anything"}}, ast.Identifiers) assert.Equal(t, &variable{"this"}, ast.Except) ast, err = p.ParseString("", `except the end`) assert.NoError(t, err) assert.Zero(t, ast.Identifiers) assert.Equal(t, &variable{"the"}, ast.Except) _, err = p.ParseString("", `no ending`) assert.EqualError(t, err, `1:10: unexpected token "" (expected "end")`) _, err = p.ParseString("", `no end in sight`) assert.EqualError(t, err, `1:8: unexpected token "in"`) } func TestLookaheadGroup_Negative_MultipleTokens(t *testing.T) { type grammar struct { Parts []string `((?! '.' '.' '.') @(Ident | '.'))*` } p := mustTestParser[grammar](t) ast, err := p.ParseString("", `x.y.z.`) assert.NoError(t, err) assert.Equal(t, []string{"x", ".", "y", ".", "z", "."}, ast.Parts) ast, err = p.ParseString("", `..x..`) assert.NoError(t, err) assert.Equal(t, []string{".", ".", "x", ".", "."}, ast.Parts) ast, err = p.ParseString("", `two.. are fine`) assert.NoError(t, err) assert.Equal(t, []string{"two", ".", ".", "are", "fine"}, ast.Parts) _, err = p.ParseString("", `but this... is just wrong`) assert.EqualError(t, err, `1:9: unexpected token "."`) } func TestASTTokens(t *testing.T) { type subject struct { Tokens []lexer.Token Word string `@Ident` } type hello struct { Tokens []lexer.Token Subject subject `"hello" @@` } p := mustTestParser[hello](t, participle.Elide("Whitespace"), participle.Lexer(lexer.MustSimple([]lexer.SimpleRule{ {"Ident", `\w+`}, {"Whitespace", `\s+`}, }))) actual, err := p.ParseString("", "hello world") assert.NoError(t, err) tokens := []lexer.Token{ {-2, "hello", lexer.Position{Line: 1, Column: 1}}, {-3, " ", lexer.Position{Offset: 5, Line: 1, Column: 6}}, {-2, "world", lexer.Position{Offset: 6, Line: 1, Column: 7}}, } expected := &hello{ Tokens: tokens, Subject: subject{ Tokens: tokens[1:], Word: "world", }, } assert.Equal(t, expected, actual) } func TestCaptureIntoToken(t *testing.T) { type ast struct { Head lexer.Token `@Ident` Tail []lexer.Token `@(Ident*)` } p := mustTestParser[ast](t) actual, err := p.ParseString("", "hello waz baz") assert.NoError(t, err) expected := &ast{ Head: lexer.Token{-2, "hello", lexer.Position{Line: 1, Column: 1}}, Tail: []lexer.Token{ {-2, "waz", lexer.Position{Offset: 6, Line: 1, Column: 7}}, {-2, "baz", lexer.Position{Offset: 10, Line: 1, Column: 11}}, }, } assert.Equal(t, expected, actual) } func TestEndPos(t *testing.T) { type Ident struct { Pos lexer.Position EndPos lexer.Position Text string `parser:"@Ident"` } type AST struct { First *Ident `parser:"@@"` Second *Ident `parser:"@@"` } var ( Lexer = lexer.Must(lexer.New(lexer.Rules{ "Root": { {"Ident", `[\w:]+`, nil}, {"Whitespace", `[\r\t ]+`, nil}, }, })) Parser = participle.MustBuild[AST]( participle.Lexer(Lexer), participle.Elide("Whitespace"), ) ) mod, err := Parser.Parse("", strings.NewReader("foo bar")) assert.NoError(t, err) assert.Equal(t, 0, mod.First.Pos.Offset) assert.Equal(t, 3, mod.First.EndPos.Offset) } func TestBug(t *testing.T) { type A struct { Shared string `parser:"@'1'"` Diff string `parser:"@A"` } type B struct { Shared string `parser:"@'1'"` Diff string `parser:"@B"` } type AST struct { Branch string `parser:"@'branch'"` A *A `parser:"( @@"` B *B `parser:"| @@ )"` } var ( lexer = lexer.Must(lexer.New(lexer.Rules{ "Root": { {"A", `@`, nil}, {"B", `!`, nil}, {"Ident", `[\w:]+`, nil}, {"Whitespace", `[\r\t ]+`, nil}, }, })) parser = participle.MustBuild[AST]( participle.Lexer(lexer), participle.Elide("Whitespace"), ) ) expected := &AST{ Branch: "branch", B: &B{ Shared: "1", Diff: "!", }, } actual, err := parser.Parse("name", strings.NewReader(`branch 1!`)) assert.NoError(t, err) assert.Equal(t, expected, actual) } type sliceCapture string func (c *sliceCapture) Capture(values []string) error { *c = sliceCapture(strings.ToUpper(values[0])) return nil } func TestCaptureOnSliceElements(t *testing.T) { // nolint:dupl type capture struct { Single *sliceCapture `@Capture` Slice []sliceCapture `@Capture @Capture` SlicePtr []*sliceCapture `@Capture @Capture` } parser := participle.MustBuild[capture]([]participle.Option{ participle.Lexer(lexer.MustSimple([]lexer.SimpleRule{ {Name: "Capture", Pattern: `[a-z]{3}`}, {Name: "Whitespace", Pattern: `[\s|\n]+`}, })), participle.Elide("Whitespace"), }...) captured, err := parser.ParseString("capture_slice", `abc def ijk lmn opq`) assert.NoError(t, err) expectedSingle := sliceCapture("ABC") expectedSlicePtr1 := sliceCapture("LMN") expectedSlicePtr2 := sliceCapture("OPQ") expected := &capture{ Single: &expectedSingle, Slice: []sliceCapture{"DEF", "IJK"}, SlicePtr: []*sliceCapture{&expectedSlicePtr1, &expectedSlicePtr2}, } assert.Equal(t, expected, captured) } type sliceParse string func (s *sliceParse) Parse(lex *lexer.PeekingLexer) error { token := lex.Peek() if len(token.Value) != 3 { return participle.NextMatch } lex.Next() *s = sliceParse(strings.Repeat(token.Value, 2)) return nil } func TestParseOnSliceElements(t *testing.T) { // nolint:dupl type parse struct { Single *sliceParse `@@` Slice []sliceParse `@@+` } parser := participle.MustBuild[parse]([]participle.Option{ participle.Lexer(lexer.MustSimple([]lexer.SimpleRule{ {Name: "Element", Pattern: `[a-z]{3}`}, {Name: "Whitespace", Pattern: `[\s|\n]+`}, })), participle.Elide("Whitespace"), }...) parsed, err := parser.ParseString("parse_slice", `abc def ijk`) assert.NoError(t, err) expectedSingle := sliceParse("abcabc") expected := &parse{ Single: &expectedSingle, Slice: []sliceParse{"defdef", "ijkijk"}, } assert.Equal(t, expected, parsed) } func TestUnmarshalNetIP(t *testing.T) { type grammar struct { IP net.IP `@IP` } parser := mustTestParser[grammar](t, participle.Lexer(lexer.MustSimple([]lexer.SimpleRule{ {"IP", `[\d.]+`}, }))) ast, err := parser.ParseString("", "10.2.3.4") assert.NoError(t, err) assert.Equal(t, "10.2.3.4", ast.IP.String()) } type Address net.IP func (a *Address) Capture(values []string) error { fmt.Println("does not run at all") *a = Address(net.ParseIP(values[0])) return nil } func TestCaptureIP(t *testing.T) { type grammar struct { IP Address `@IP` } parser := mustTestParser[grammar](t, participle.Lexer(lexer.MustSimple([]lexer.SimpleRule{ {"IP", `[\d.]+`}, }))) ast, err := parser.ParseString("", "10.2.3.4") assert.NoError(t, err) assert.Equal(t, "10.2.3.4", (net.IP)(ast.IP).String()) } func BenchmarkIssue143(b *testing.B) { type Disjunction struct { Long1 bool `parser:" '<' '1' ' ' 'l' 'o' 'n' 'g' ' ' 'r' 'u' 'l' 'e' ' ' 't' 'o' ' ' 'f' 'o' 'r' 'm' 'a' 't' '>'"` Long2 bool `parser:"| '<' '2' ' ' 'l' 'o' 'n' 'g' ' ' 'r' 'u' 'l' 'e' ' ' 't' 'o' ' ' 'f' 'o' 'r' 'm' 'a' 't' '>'"` Long3 bool `parser:"| '<' '3' ' ' 'l' 'o' 'n' 'g' ' ' 'r' 'u' 'l' 'e' ' ' 't' 'o' ' ' 'f' 'o' 'r' 'm' 'a' 't' '>'"` Long4 bool `parser:"| '<' '4' ' ' 'l' 'o' 'n' 'g' ' ' 'r' 'u' 'l' 'e' ' ' 't' 'o' ' ' 'f' 'o' 'r' 'm' 'a' 't' '>'"` Real bool `parser:"| '<' 'x' '>'"` } type Disjunctions struct { List []Disjunction `parser:"@@*"` } var disjunctionParser = participle.MustBuild[Disjunctions]() input := " " b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { if _, err := disjunctionParser.ParseString("", input); err != nil { panic(err) } } } type Boxes struct { Pos lexer.Position Boxes Box `@Ident` } type Box struct { Pos lexer.Position Val string `@Ident` } func (b *Box) Capture(values []string) error { b.Val = values[0] return nil } func TestBoxedCapture(t *testing.T) { lex := lexer.MustSimple([]lexer.SimpleRule{ {"Ident", `[a-zA-Z](\w|\.|/|:|-)*`}, {"whitespace", `\s+`}, }) parser := participle.MustBuild[Boxes]( participle.Lexer(lex), participle.UseLookahead(2), ) if _, err := parser.ParseString("test", "abc::cdef.abc"); err != nil { t.Fatal(err) } } func TestMatchEOF(t *testing.T) { type testMatchNewlineOrEOF struct { Text []string `@Ident+ ("\n" | EOF)` } p := mustTestParser[testMatchNewlineOrEOF](t) _, err := p.ParseString("", "hell world") assert.NoError(t, err) _, err = p.ParseString("", "hell world\n") assert.NoError(t, err) } func TestParseExplicitElidedIdent(t *testing.T) { // nolint lex := lexer.MustSimple([]lexer.SimpleRule{ {"Ident", `[a-zA-Z](\w|\.|/|:|-)*`}, {"Comment", `/\*[^*]*\*/`}, {"whitespace", `\s+`}, }) type grammar struct { Comment string `@Comment?` Ident string `@Ident` } p := mustTestParser[grammar](t, participle.Lexer(lex), participle.Elide("Comment")) actual, err := p.ParseString("", `hello`) assert.NoError(t, err) assert.Equal(t, &grammar{Ident: "hello"}, actual) actual, err = p.ParseString("", `/* Comment */ hello`) assert.NoError(t, err) assert.Equal(t, &grammar{Comment: `/* Comment */`, Ident: "hello"}, actual) } func TestParseExplicitElidedTypedLiteral(t *testing.T) { // nolint lex := lexer.MustSimple([]lexer.SimpleRule{ {"Ident", `[a-zA-Z](\w|\.|/|:|-)*`}, {"Comment", `/\*[^*]*\*/`}, {"whitespace", `\s+`}, }) type grammar struct { Comment string `@"/* Comment */":Comment?` Ident string `@Ident` } p := mustTestParser[grammar](t, participle.Lexer(lex), participle.Elide("Comment")) actual, err := p.ParseString("", `hello`) assert.NoError(t, err) assert.Equal(t, &grammar{Ident: "hello"}, actual) actual, err = p.ParseString("", `/* Comment */ hello`) assert.NoError(t, err) assert.Equal(t, &grammar{Comment: `/* Comment */`, Ident: "hello"}, actual) } func TestEmptySequenceMatches(t *testing.T) { lex := lexer.MustSimple([]lexer.SimpleRule{ {"Ident", `[a-zA-Z](\w|\.|/|:|-)*`}, {"Comment", `/\*[^*]*\*/`}, {"Whitespace", `\s+`}, }) type grammar struct { Ident []string `@Ident*` Comments []string `@Comment*` } p := mustTestParser[grammar](t, participle.Lexer(lex), participle.Elide("Whitespace")) expected := &grammar{} actual, err := p.ParseString("", "") assert.NoError(t, err) assert.Equal(t, expected, actual) } type RootParseableFail struct{} func (*RootParseableFail) String() string { return "" } func (*RootParseableFail) GoString() string { return "" } func (*RootParseableFail) Parse(lex *lexer.PeekingLexer) error { return errors.New("always fail immediately") } func TestRootParseableFail(t *testing.T) { p := mustTestParser[RootParseableFail](t) _, err := p.ParseString("", "blah") assert.EqualError(t, err, ":1:1: always fail immediately") } type ( TestCustom interface{ isTestCustom() } CustomIdent string CustomNumber float64 CustomBoolean bool ) func (CustomIdent) isTestCustom() {} func (CustomNumber) isTestCustom() {} func (CustomBoolean) isTestCustom() {} func TestParserWithCustomProduction(t *testing.T) { type grammar struct { Custom TestCustom `@@` } p := mustTestParser[grammar](t, participle.ParseTypeWith(func(lex *lexer.PeekingLexer) (TestCustom, error) { switch peek := lex.Peek(); { case peek.Type == scanner.Int || peek.Type == scanner.Float: v, err := strconv.ParseFloat(lex.Next().Value, 64) if err != nil { return nil, err } return CustomNumber(v), nil case peek.Type == scanner.Ident: name := lex.Next().Value if name == "true" || name == "false" { return CustomBoolean(name == "true"), nil } return CustomIdent(name), nil default: return nil, participle.NextMatch } })) type testCase struct { src string expected TestCustom } for _, c := range []testCase{ {"a", CustomIdent("a")}, {"12.5", CustomNumber(12.5)}, {"true", CustomBoolean(true)}, {"false", CustomBoolean(false)}, } { actual, err := p.ParseString("", c.src) assert.NoError(t, err) assert.Equal(t, c.expected, actual.Custom) } assert.Equal(t, `Grammar = TestCustom .`, p.String()) } type ( TestUnionA interface{ isTestUnionA() } TestUnionB interface{ isTestUnionB() } AMember1 struct { V string `@Ident` } AMember2 struct { V TestUnionB `"[" @@ "]"` } BMember1 struct { V float64 `@Int | @Float` } BMember2 struct { V TestUnionA `"{" @@ "}"` } ) func (AMember1) isTestUnionA() {} func (AMember2) isTestUnionA() {} func (BMember1) isTestUnionB() {} func (BMember2) isTestUnionB() {} func TestParserWithUnion(t *testing.T) { type grammar struct { A TestUnionA `@@` B TestUnionB `| @@` } parser := mustTestParser[grammar](t, participle.UseLookahead(10), participle.Union[TestUnionA](AMember1{}, AMember2{}), participle.Union[TestUnionB](BMember1{}, BMember2{})) type testCase struct { src string expected grammar } for _, c := range []testCase{ {`a`, grammar{A: AMember1{"a"}}}, {`1.5`, grammar{B: BMember1{1.5}}}, {`[2.5]`, grammar{A: AMember2{BMember1{2.5}}}}, {`{x}`, grammar{B: BMember2{AMember1{"x"}}}}, {`{ [ { [12] } ] }`, grammar{B: BMember2{AMember2{BMember2{AMember2{BMember1{12}}}}}}}, } { var trace strings.Builder actual, err := parser.ParseString("", c.src, participle.Trace(&trace)) assert.NoError(t, err) assert.Equal(t, &c.expected, actual) //nolint:gosec assert.NotEqual(t, "", trace.String()) } assert.Equal(t, strings.TrimSpace(` Grammar = TestUnionA | TestUnionB . TestUnionA = AMember1 | AMember2 . AMember1 = . AMember2 = "[" TestUnionB "]" . TestUnionB = BMember1 | BMember2 . BMember1 = | . BMember2 = "{" TestUnionA "}" . `), parser.String()) } func TestParseSubProduction(t *testing.T) { type ( ListItem struct { Number *float64 `(@Int | @Float)` String *string `| @String` } Grammar struct { List []ListItem `"[" @@ ("," @@)* "]"` } ) numberItem := func(n float64) ListItem { return ListItem{Number: &n} } stringItem := func(s string) ListItem { return ListItem{String: &s} } p := mustTestParser[Grammar](t, participle.Unquote()) expected := &Grammar{List: []ListItem{numberItem(1), stringItem("test")}} actual, err := p.ParseString("", `[ 1, "test" ]`) assert.NoError(t, err) assert.Equal(t, expected, actual) expectedItem := numberItem(1.234e5) ip, err := participle.ParserForProduction[ListItem](p) assert.NoError(t, err) actualItem, err := ip.ParseString("", `1.234e5`) assert.NoError(t, err) assert.Equal(t, &expectedItem, actualItem) expectedItem2 := stringItem("\t\ttest\t\t") actualItem2, err := ip.ParseString("", `"\t\ttest\t\t"`) assert.NoError(t, err) assert.Equal(t, &expectedItem2, actualItem2) } type I255Grammar struct { Union I255Union `@@` } type I255Union interface{ union() } type I255String struct { Value string `@String` } func (*I255String) union() {} func TestIssue255(t *testing.T) { parser, err := participle.Build[I255Grammar]( participle.Union[I255Union](&I255String{}), ) assert.NoError(t, err) g, err := parser.ParseString("", `"Hello, World!"`) assert.NoError(t, err) assert.Equal(t, &I255Grammar{Union: &I255String{Value: `"Hello, World!"`}}, g) } func TestParseNumbers(t *testing.T) { type grammar struct { Int int8 `@('-'? Int)` Uint uint16 `@('-'? Int)` Float float64 `@Ident` } parser := participle.MustBuild[grammar]() _, err := parser.ParseString("", `300 0 x`) assert.EqualError(t, err, `1:1: failed to conform: strconv.ParseInt: parsing "300": value out of range`) _, err = parser.ParseString("", `-2 -2 x`) assert.EqualError(t, err, `1:4: failed to conform: strconv.ParseUint: parsing "-2": invalid syntax`) _, err = parser.ParseString("", `0 0 nope`) assert.EqualError(t, err, `1:5: failed to conform: strconv.ParseFloat: parsing "nope": invalid syntax`) result, err := parser.ParseString("", `-30 3000 Inf`) assert.NoError(t, err) assert.Equal(t, grammar{Int: -30, Uint: 3000, Float: math.Inf(1)}, *result) } golang-github-alecthomas-participle-v2-2.1.4/railroad.png000066400000000000000000002542271505300366400233630ustar00rootroot00000000000000PNG  IHDRKn1TFiCCPICC Profile(c``I,(aa``+) rwRR` `\\TQk .Ȭv>'/G^qZ|S= JI-N8=1V./);l"9 v:NՄ97lD/l$$t$6^qW r tq%^AIjE v/,L(QpRg^(!?%B@bP!B,r }15+H,J;Kqͽu5^.c``p3[a%VeXIfMM*iDKASCIIScreenshotchiTXtXML:com.adobe.xmp 1099 Screenshot 1179 plG@IDATx|e'iҥ($衂("Q7$A=yzW0`"QH"E," EL@iRY'fwI}'JBB@@@0p@@@@S`ɟL!   B@@@p,q`0   ,@@@@KL"   K@@@@    @@@@!@ā$    PdSV-IJJh   DLo˒6mژ@|^xY`qfs"z2_wu;1   #7XryСCf?o&D@@@H n8۷;SD;t GyL2Ŭ;ꨣJl@@@@  s1r5<˗/liPF>ۺuW^l̪U%KH޽Kl}X   DB `iժUkذ޽[:v(<@|ڴiSb @@@@HI'dO?4߿0vرfW   D@X%w\SRRN+()qÞ@@@@$P`IRR*駟.'NF8:3<ٟ @@@@ IEV W!8 ֭]vI͓֭qul   "`I b@@@@ sl@@@@m%n   Ĵ>    n ,q[@@@@ tQx@@@p[`ۢ   1-@$#   K%?@@@i%1]}@@@ X(!   @L ,    E@@@bZ`ILWG@@@-J~    Kb(<   -@mQC@@@ XG@@@@m%n   Ĵ>    n ,q[@@@@ tQx@@@p[`ۢ   1-@$#   K%?@@@i%1]}@@@ X(!   @L ,    E@@@bZ`ILWG@@@-J~    Kb(<   -P 5B=z9<Qn$--Mt^I   B J8Q Iff&0WxFFdeeI@@@\`I*X[W v0&ɇ}@@@p ,qjczԨQEsl3]E&┩شѤ|Q{gRwmeBB@@@ %A(z(999H f h;y &    XRumA7 hdV&*((o!8   @\ rVEޠ()'KsW @$@@@**@˒r &%%y뇇"b]uRH   +@˒r9էhRd$%   *@5~rl]:TIT49%    XR=g}<0):urH   TDrEvt_}jtPRە;oĝB}\/Wxzm]xK@@@AQJtR3 |I$I :_DnKFnHYPz_Ghl=tܶLٹGvn)ɕVZEeG_Z+˅7^(X],n$2OdBv~TTI>G߱$O^8pCGMݨ4ȥh=4 @@@ܢV\oI&L&)ModMB k>_#-Oh)g,#yRje)}w\ .WMLw,ϴ.x12dУ7ߙoH*VYyno߭;LcI-V0|4F@@!@(U*yS ]9iӾ<S,j57j;U%t;G3=oW{>~^yfrC_-YV`zZ}*oAAA1_ˊm    @D,YF:>QNly09yw'^3b=x2c4soΔ7fy]5ծ{)#ߓG3kOFkGL5_}x|l 镇^!ȼy{ŝWcoٸE2au)&(р$6@@@P 6}*$Xk Q& tL>tg綝jZ`5Zbf^N=!U."3= la[jZrL=3G3>i 9nrUz.L[؏o.wF   @4nPZr>5vM5P-#4i+.OMM h i8fc~uy̰=>Iy`Е*{]Wi8lDk-O>@mum'v<}@@5Z@fkܒ\꣄ |}] G!\WcX(N3X   %Gt#ys"*&)l.vs։$@@@V`IF%΁:333Cpi!T|9g   i%!zn;?DlA97N@@@ oG%t]oc   @HFB3뒣 [Dn=2zhc캤 tG~   Q`Ińvña8dLnȁ@   ĸ0UJw0>fnz3EA@@@ Xjՠ47-Mt:Q ѤMAq~    %@    < 'z높!   @DC"   @ ,޺d    %@    Kn(   D@`I9$   DJ    Xt   +@$z높!   @DC"   @ ,޺d    %@    Kn(   D@`I9$   DJ    Xt   +@$z높!   @DC"   @ ,޺d    %@    Kn(   D@`I9$   DJ    Xt   +@$z높!   @DC"   @ ,޺d    %@    Kn(   D@`I9$   DJ    Xt   +@$z높!   @DC"   @ tEJ}uС\gk.k߾}ڗ@@@@pTw[]uV={IMM-w   Ro>p۶mg=uL3   ă@@mr5הz˖-lW^yE>CIOO+Rz!Oz{k&M#<"+W6y 6Lv)-Zכ޳W_-ws9Gn6r&@@@@7 l߾]>kݺԩSGm&&M2wѮ6#GEڵkeڴi<&L ڵd2eY`ٲetIN8|P5j$ڍ筷2fddxb@@@pKA4q駗xip;uU>cJ^7gH7oq&]% 2.̳7ӚzeO   &PURĪ!Ci&YYYRV-~֭RF ޡYfRvm{ּ䘖(7pr)-W4M:ռϘ1ükիi~    n ,۷x!<3(ͼvx.b1   %P<T),cw]qS mҽ{m@@@@U5>E~2wkժtQ>S:t^[vae    @0K4XѥKoڴr_ chמlqg>^Xo'=;    @}f(ERRNie, ؛w_^tEm;w3   -d ZvecٷoԩŞ/F@@@8K˒{<n ~.v%(9  D@ !9./ᬏEt;'u򜋿ѳ!p^g#BZX2G@@NBT3'    , T@@@@ !$D5s    @@@@B ,OIINR6~Q|٧DVGɑGs @@@&%T1^dE}E Β:"   MK6b,?n!mڷ+R[MYo WzYvm)_/ZL-K;/s@_7/=.?[f.w,Xvm%mOk+/!5ٜO|wѭs:WjԮa@@@*"dݸU$@MJJ d3bE,]#{KneGKrgۨLM[6۞=NZ?Y3үw9c[iwf;ɟ';qL4]~%]x:xȌof}f aU̴r׋x۳s򦲮hۧ~);;̦Inݜ|N;3gF,D@@\% ׹k$33؍޴eeeڜe8oNDԱD6`ep5 |n>׀k'?;Yb&ѱHZ9V8(WwvOZZVYmg`_;~8,)u/D[7x`#z T'93v96"  fA-vM2f9(\g1SM<U^u3'v:QV;<)'7?3vIrƾ/QHN?hRxs ddd_y%)-GA@@'7-K7otq>&?Ж%侩p\_:=SM}.v˒ ]bg'?tѧhYT9?cڢD&uF˒`@@ͣ8/ `'C[#,F(Ѵp3p]Mty̶۾f.~xȣ4.!nd  Q ܴ5v 7)څ}_MA MRx h;ը]C^՞=J֯_)~z`@@D .%Z7$;iz5"Oy!CSE_DlѢEt!  Ae7up~ ˆ]@!F-    7aiYbw)K-GkD_(_:O N9(jp9WNn|]6@@@`4` WRSSMD'qcW2ΧRSVO_)O>JsI?Ol̊   ym;Ann.ODZQ|Fdǖћ$mjef:}4o\6  ˢL@(w   ( ,`vMLDIFdb%q\sylyZYAUr˙'WǵN=KV~B:Y]Mj֩c䃱KWRVu K3ɁdZT^Q]brF&7o@_L/WM7IܫϕmZxD=z ;;؆9`# @@g dee7V&"&xдj*Yk옣Ef˝]-1cǘ#>R>h!L玟+OMwrכJ+#GuKk<73݁^ϖ Ǜ hծ|mU,6L$Lh;`~MJJbjj٭9@@H,D[hn~]wyVP{nhGV!2Jl&,)~|_ǕfJŃ{MFG5mMRPP *[#  @ $Lrme2p@ӝ ??ߴ2k-T}:K_oΔ &1饉RJeȗ 4;_xr'ۆ.'t:QwP,cݦAҦq]wIdvdILGwcNl) ,cf+.>9Z"8}6gvL" ,jnjI ?@@@ .Xb׭}Ch]#Sl'^?MG•FcTZųL'8W9֘#vo~9BcV@CSE&wr&æ>%N~lJ]F\#!  $@K9SUAr ڣ Dvf ˌ!2}4]r_Zɢڭ&zb.5NDk ޲[L>Anߥɳw>#} j֪㑼MӥUfwdEfPX^t$vkFgmg@@@/ܙ-@rd:}c!/!7=vY9tSw+RҬ'tэN_(\֐$Er-CoUҼ׭h\VRc}:Ymwzy=cb2   >hYE%*U$ 5 {歛>י:= sNk@V: xV[LHagn;;@j6yDM৹yS/6o8O~Ռe٫xG@@@'8+@@@hY4;"   @< ,Z@@@@ hSXX()))AC%  ANqs.>p!  aH%a    { ,ػ8)q t:D   @K/ rL5*qN3E@@H`IfffV4'@E\,@@ UkC[h$'''&*' moK;R%9cz%ֱ 6iUE)@@@ v#<Dž șſ/,[̹3} wŷ^g"64}'!  "0 x `n$--MtT1;Su̔-Nkyȁde;(sϕfֶ=.?[jQSd3d]r͐;~vg$Oi-r6+_BYt$WN}~&~/ѣG e 6@@@xHnPSSS=߈<wAoIXX:$];Kvdش=n[pw۟*vg>#{vs4믹o\uҧa/3L9oP?E%{M/z=۞-sRRױ·@JJ龦Ҥ"!  &P)Nh~m}XW~}-}lXAvl!o#s\tEL;_ƭ/BY䗽H:? N:!M!7+igf=+mSei϶L'I4(B+L @@[ %ZmxVVӁ^5M@2d2Z0Yt hWZV@D7\ G:tyVSzͻV]??ĖҰYCYz˞]{x~F4/ .ꋄ  $@ŒY]zC}| $%%^QFG5tH 6+\傿^ٳZjfJ*e:Qf_UhC#{|@@@H`IlNp)߮V޳F[|6S| ן/ˬAZ5Mxq䘿cuW ?f<929f~fe{wyv]j   @0 '$ L?1jb e2tm%eGmVT[*kU/+'u^f:Yx_X{o5V"eŢf=?@@@V ]oH 84s.{_j\v7.Χ |x$)S>3F@@NYBX[حE9I@@@NTA@@@h X @@@@@ jDMUP@@@,Z28ERD@@@eIt   (@$ksB@@@4]XTT'"   %@/f@@@@ $vs    %@ Y@@@Hl%]=   x 0Hg e0yyyxJ)))ҭ[7IKK{:痞wR炂   ,)$̌{=QF|NN mJ~   Bn8PS[}ͬO}f   Q'-KUެkwwVڢę˾̞^wk\ӛ 7Ҽ%$yyM٦I(;<żeܳoX"5rҫg0298e@@cheeeyn0q~cwD ~'kt~M3 a.9${Yz-y控eߞ}z~HB'ߐ {XіR: ߵ`   %[&v@>W'JyAhI8\nzߋTT֢9-BILkըV.~\]0+V/^%/|cf0e'^( =A@@ plW튣 }|&oʵ+Rv+9MZ,1~&:xP4k$?m%7SN5>eqrGʏl~zYX]@=X5T9'k ɕ+vA.Kw6^pV1o7K ]1e}$7_YzEKdoek3dce\AO9uBi}ޱ[ξb:$ ̗>X8vQTJ$Zߜe?S㙋D}mSo!+s.瞝{d֝V$K'3zݤό],Y@@&"+ESBYmUE 8 Ai?O;(?lXlhy_0uWU]>ia?.f}Ӗͬcl~gDI ?@@@ .Xb׭}Chx~rH yj mMㅬXtјD m' /VG˯&o|0}ү]Rz5{do.n}J|hF}]^V_;j@NZA")kscˏ&Xv,p7gN>dϼNTդ-oI+AJojIB@@pG;&t.MRei8|x$ڭgadl~,[Lj֩%g Eo<Ѯ:["̓GԔw}ϼkQ>[-O> e=u99 m Yy:?ik~WO^yeсf5 |D Uw]%۴0󣢁˹ e5#a9/~i{s2}4ʏW uս{Lժ*K@ӿr,죆X7\@D[iW5KWy`?MYA   v{buNKTjծ-]T!'״>YZyc2YO`yl^Gkk4T V|#˵ŊRE=ѤڂEIүY@@@$pE\hgי@Ң]8{.q4Im*:w'lߴݴGz' hhWDmh,%^o+(꽭ntK&M_~fpX}ɓ=-OtG}TH    @jYRҟ%%%+T*ULW{W_mO{[=\OPE+-M4PI0`63f̐c=,    jZltҒ5~3gʖ-[dǎfdOח^{M~iYv4h@RSSŹgc&@@@@ ,)?耬*D~ZD_% 苄   DBB͛7СC=~ƅ@@@u%^@@@\ X*'!   @ ,    W9 @@@b]`I G@@@WIf    Kb)?   *@UN2C@@@X X5H@@@@U%r   ĺXAʏ    ,q@@@@ z R~@@@pU`d   .@$k#   K\$3@@@u%^@@@\ X*'!   @ ,    W9 @@@b]`I G@@@WIf    Kb)?   *@UN2C@@@X X5H@@@@U%r   ĺXAʏ    Tv572C %//ϼ*!;#)))ҭ[743,3=}:OrGV57Ze$@L JmV JIHOO!\Лό r9N#X\aP q!@$.eiA@o̊~ΉO~#'3Xfmo"{2bZқh#~suK#Dنk< Ah=zL=ֱ>GUVri5` RNrWߡv ߡXV"d@*.`ݨ|XYL]l̼@ܡJ,y?:˦N֍-Jn^[n{8U;F`,5) ^ ,-KBA?v^n.˺޼%PӲ$;A I [wo1w& M^t;U_ JV2Mh} uOy?k;7wο-b'g{pև^_n&7ƔuK}@?dwGRGB5[i?n0u։:}]@g;ہNC%P'^;Կyvr^&Ef~1$A=z=Zu_ Ǫh ?D?o,&mW֜?vpZt^Ogc "Qv.zpYIAS5yXZf )aؼkծ7y_ʾJK4>i*wK  @Īe?ᾐoǛf71Ϟ_dm>Zˑk 6sR;vq&&yC_5K-B"[i0϶wKy=7y!Fꍭz#Z x%@RE 徟TfЇ_wL?R8p7/8)h"ˡ{ߖ:n~?)ԭ?iz+XKrJ=~Ϸ7ο3-s|n=rٱȳw>Sao} 㖹ή$q6 04bkb1=ϋ3ʛ^}x/;vCJ=J=w[Իs@եK.s-9n]R@DXW3zM ig̍fyط')7]Zo@ξ]<\nyVzYfAP7cߗ}AV~lK~4lnt`Yuܶ-Ol)FG52귍јc WZgRj+Δ6ۘmIo>dP7nyysg4&yO9irޠ~Rz5{߻*7lR ߻R\ } "ת g ÚmOk+֖lIοa΁˓" O`׶g=s5$rD#$))I[sEoɬ7ffU"muIɕ;w]Og}l_C*%Z oowm%G>ZνTU̪o]A][蹦"sMH~]dmv!Y0e|Wr`9%7yf۟Dk7kը]/kz\%Yj֩U8nͨFC-͵Fmޝ] urfq2;^C-v+^xͿMؘ`]b+`& EXحnN7Y.={|vO͙3;s&yf<ez믿A>6n8[nKy+ȢToϰq65m^aVޥ<+Kn%Vs0q҂6aoѼ1Bjcp +KL#a݅+7a 93e>=F?i7n8rOP{c[ov'^isz[Y\VrY7! US7So7${S.S?kqE*׎Dؓ:4/Mx]1*WxUk:(WHq,;?w]&hL"<=ܺ ;.6PJK^G\[-/*d)tXUHC|2rK3>ÿ˟by}׶>HWYL7. sїݒ^7ºQƆٕs~_L!\s4%R`l˖qo])6̥~0sǙNbI'*8|s] CkieSZdrV_vHYRhQt^lVZ.d:{cr~f|jdd-֯,t'˃Ts蹇2nwoN(%Sĥ]Pu|᧣0E-3HaِB ă@x#YpmZ]N?!PϨCp'}zkZ;q?{| ֻoj\u lLzmcAU1ڊڭ! 2|pG3S+ )Jh%F?~(Qߟ4~d<0z^x{= ~wz3!)+h&)=5mmֽMjX)6갱mҩgt5NYy:0'?6oX~坓Dl{9` U)J_)?;^e/9sgT<>Գde'Yv%s _abe27ㆷe.l4}aOY)JV^w(;kNS*F0lg:SuSX.(O!$RY腑8q„7 \͓\=-<~f61G_tLN>7=XGƅNↀV0D͞mmo7[2#I}p UB-~I5^}sn|M>Gʹ&naYnp}WX 76QreÅ eR7>N&li]?JYG0Lĥ+EZ,X*lxַ?7}ޗ{Zx:YQlڬ %S/ 6)E #%ΪD 3-oǟht5xc'5ۆDÌ$޶ 8IΚW-4i7?WY. I!8p&q9$Q[aeg;D-ܝJiHe4)c7 NiڷK/Ze+vXǤ8 /%Wd1.j2і D~0{?5$;olO%RFV w:6 {'CCEo%tkē[nL0' (xj>F%4\ FQu/!05|(B's )Eq"qs͈ǷA|-4sPYs gCnwCI9卥\$N?~wċshMC{?C"tqz- DK^jc`(/5<I4<XHi\$|-!(RrHo_(RU߽!e FGRs\փ ']v\&uñ>n7Yi0ͯlȏ(`5v/Fj(<a87l\D ؙ?4Z5P;*P؆3)abo>rYTF#R&un47D'j_w2˿j^Ӫ_Tey.C@ a $U*%.st5{]Y1KJD)@4!ehR}4K.w$S[G8Ws2Iz%"b]S g 0 @$v#EБi &꽻w8N:va/Mhy0t,kiA3\z1Ish8-K㯬RZyޣj؎LATv+ܹ]mz}JoQR=r#+5j~詠1\4U|"R@hrXU'\iT/p0,:4񯆨GXtCrѠq(Qzs=Jr`YȽhZObQ N5ڕv,; ӱ.!Cz;bAE?N*haWe'm軕ŎY!.%&u߷^71|1͚E 1OŊA-n^xyk"?EtNH h04(+fRxIUY׵pzشk=7슻uwV0 \t ˷_r{P.h4$MCzF|0N><(XI?%DߍA~s ˟҅bu<#ӧOC2&WpHY! 8h@; 0 t:nR5Փ,s-|R=Kc`=7e7ڊ\7q X-G8z659CV|HǪJѡIh_ Urd~vۈHRj?ϨGPhwff6XDJ+5d%:J,uM35"q"jUM2G`c@8i,˲JiBi523Uiϼh"U9^^qqZs*kdy"]yGϖJgNz^{ݕR }9$ʵFJQ"E7 #~WaʍQ(N>/ڇR)oAn)Ǥ<_v"eN^yekLHxZj\jeϡQU0GRnվSUʹҨ_7,Νs==q)  :6;eWXneբ”w\JZWHa2չ<7jr4EI{Qs?@&A#av!fz\XwQW3gqDzcVph_~j,\@Y.0es'ډ`?iOY2j4Iݽj:.$W=P? gy2WKDuT}ρzsj₠P"I&JbK.i=[XT2e\th ,/,emlpL~e+u=y](`1м@/xE|)nN.q~\S>%y{sE7gWgҤ˚_+Hb+kbn7R+Jp KGU^IISޥߣcCjr]+-koǟV];;KLO$߽GJӪ_˔Wuq@e!eI]2$R4Z,z8j(i"ʭoa{;WֲAIs Q^n5uNV\tE R(ܚ4EI1޵~v5x++nqGVi$1Ҿ 9 GeI~ܸ  "`Ȑ!LV$Sż/@(%ec!@L@Jdf$knݺ1&3&Br&$gd@@Ha~z}՗R#.ϑrD?7,I[ ,)>B @ `Mv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'$rx@ @@ ,Irv@ @"'мPo?[o^xaӤIB~@ @ P6-K\sMu]g~iӲeK{BٳS :{~駔#F4  @ @ *%Wd98iLk`A @ @@ ӦMӳgϹ²FeY<ڊ+8N@ @ $,YmA{f* .ݿ{ 7`t}UW5;찃93ͼ۠?:)}1/Y}Mu@ @ P %n{~fLͷ~,˭~^zfʔ)橧2cƌiPa[o[/uϴi?:f @ @9K\>3|u]/R=c8=v[n9 @ @E`eI&MRakڴλA.].fܸqfWO׎78ֻwom@ @ P2MfRio\rISSSc5kQ?GJY  @ @,),@ @ D @ @q!$.1A8 @ @%@ @@\,KL@ @bAeI,@@ @ (K @ X@Yh  @ ąʒ @ @ P" @ @q!$.1A8 @Q8f@IDAT @%@ @@\,KL@ @bAeI,@@ @ (K @ X@Yh  @ ąʒ @ @ P" @ @q!$.1A8 @ @%@ @@\,KL@ @bAeI,@@ @ (K @ X@Yh  @ ąʒ @ @ P" @ @q!$.1A8 @ @%@ @@\,KL@ @bAeI,@@ @ (K @ X@Yh  @ ąʒ @ @ " ̘1L4ڭ@ PUVYޮ];Ӷm[c# B 4Hp?2۷ӧO&'\ @dĉVqx@UIa8UyiYoEIip@ /,K%} h%[nLY  C@'O6OAC PF\؞w @C9uG- Ba8m(7nEI(t8 @wȒD '6qw- @ ,KzN҇h5 H@DuI@@XX%P rq(JR8؁ @D{:sC @a 4wC0& n;PT $Pڶmk;tO@4 tϓ;!TT[}4izٳSN ݄8wJ@ }YhPX 'E Cq p)r}!۷ъBNq' $沈H_].iQ9b@`NXRJ6ƯNzҕ$΄ؙfG jȻ^r@T.h$&: ( T%Bu 뭤t0ʧʉc@Cϻ^J)L\ 2}n\#@Ka8aIE&*1ʤ_sB@PJ2 ܷ @ $@( `{ * % _[V/ P|(KϘ'@ + 0dE@UA@stM@ P|YR|<9PXc8h\-*4M$RK#@ulx[Pm:=b ʒ8NP&Ob @XHiG#aZ [O"Ń*RN'OaϽ:F $o26o4'QDL/40@y ]卋$=]555(Ji56T1j3\%16k r$ m8 @NJB@eX$d&W| q滸ZTxJ6pˌ!̇w@:!k9}OH5jͅYֈqk:{!p(qUTJTΨq%:q,H!"5fIVV6!D? R8*2yR rl/uQSU(ҧ$a{( É"~@(רsS+y@\vҿ  h ?ŷ %TR(V,]!߉zǫ$ҽ?[JU?=<[@$@^L/]v֘#]IeIbBUM !WmU|H^\'&%Ɋ/B @PN<(x6 HO0hj&x bI&ߍ>بÄ]PWZ U QCTR}%={v '$EM)w[[GYb=VnSҙҩ3= QcJr;*l'''EFü_T2_4L<Ռ<Fr!W \OVS/"z^odzDž%}}~ua}-{<2蛔( }()8@iĸӷ3-2*pQuWJ,IJQo.EKU:/ LºӳJ,R\"7" !5ݗOGrEJ5.8TCMCsadRYB)qR$toM]7qqVXr0Pt &N+ާO;A #?7b1×IJ3bG^K,!~^ӆXd+_?i7*0^%?c*;gQ ˯`q*|soP$%^R8~_Jo;|>_nEGIXKjٙ [G.au@qX#P-ʒLjRzprV3ƹ϶UCc`(ϜaN|E)}Xyȇi9qOKjٙ+[w"y]1{CN:&-Byi./[%Rg1I}_7}mgFI^ڵk_-ӧw'JE{ga۝eR\meHJ:IW\DaPw$]-[d"LOϙ+յ×Բ3W*_I T?O/OOuP=ӯOXu|7oJs.q՘~ͼVv7vOOlb+ <9L#=l:V'`.;Rs'Ϋ*?8O>ݩ6lG~jFs!|­{b߇M41{ne0\|zmQ\V#/6*@J-hh O'? w[?U,^b'mZÉ_tmV)w2qXض= :6}ẁӯ7{t+xI1;wߺXWy&]}oU,sJ~z#UI_Gհe}OقԃO4dsZzeg=8HH)2Ֆ׽(I(ӽqsh3-4(}9j#RWM'sئ\&\~)N5==ٴ^u_)q暎e%N-vL sꍧ/?<(I\lε6kN͵r-Au1jE-_.Q楅NYF0mzyfӦ<.Q1L=1{YBqϚFD҃6lٳ}{'?غy`f5KuoOF ^?e$qK 4?̳C&r{ksK&>L7˝3w{-uz Kmρy,bﰅҧdE#w>l5of.}rsY7{,j9sט[Lz?2f1\&ѳߝ|-Sn5J4+ʼfEX~39^fy1?*YkL׳1lbed%E椑͏ Gu2Gw}{/g&hxukrڴiSs喴Kz:n~-(L\YZ6yss+CŦݙ4ʛ:no?u:s1ïV0ؽw_~gvyf3-:nad4^f {]v6'uif}MN)d˶ZL WVӯm_o2Gew75j41{Zig5u.sJyJGR`-*k4 F˙gG|3n9& z4]rT+R)>5?l}'vU/G- rungo1ύb xN5Ku;f- .:B7(j֬皻޵mA͂|S/:쳭9i|ѝͫOjan}^>G7Rԡ5,DuѷBgغva(\O?/BW3ߨ^^O,u\Q, k Q]L6_ӱ ިSkfwV4mg;Ӭul_OTԨ3:Z7!e&mj̿T#ډ*/(8EYcU 7GiSUmH.9"#SD'l*D ]9,+^(C~7X%J h\ߠprMϫݮyf~UW\#ϯSV) M* cݑD{-w>2ߪ[U'Ѷ6$=˺F_fN~OҾ8jJ )!5N7 RR \6QC'n4Kd'6lG*|?h.(`sUv*l'2]9j/d+4NrkAĩؠ"4$g9L)K ]ZҸzɷ*˼17 ЧAz2P"Y0Z}iCoCȠ2 [2.RҜY)uX8g!p78VwCݬ[u((Oh)n7W)e{t,De'^e빿.}4ީrc7*} sNBpsg]zh[m`ӖҾ~qٿQ{7?}ȬfM߉X~;|[9U^|-)A 澙gUVs~Vno/+, +a ~%ѝҨdm71άڟ˺H3=f7So7nе.A=F߈DJ~Ǜݏlac{N}NTّۚk7) <:Q%SDq{MKf<({F؆䱗gO}S޴۞ל2iW{}]9j*uP*櫬ʦ]U?|C{Uk8{h ߗeSr5TSo9'P8Q?:e-ԢvIe$z9^>r*J _vKc17ojyL[ҷ" fWnpK P/(ދͺg.T]WV:nXSyru4znfTfkwZ7Q<[>?Ƕp0J :[ej;.s:aCS@G2,@( |f7jǯ鶡yP{ZGxܜ>nf_'Qy-~iq$%!S@@e'&kXk^eNԠwpk>3AQߪA!t_kz UQֹw"3 Y|#boxW5}W[u"YߞfӸsڞxuO}FJ4F"eD&RTVC$/_z$4/QE,}_+Y7nT)UU0YSMDfeݶJYҗOn!\"sp L=wNj.XXdfיRh,k?VG㨰uęC$)OJ_10S|w,F2G`8֪CN|z09᪞F=nC7fdƨ>xvƩk &lPIиÑ滯,IVq Z-qfguid^/MxўjU|>gڪ/I3ݣk3gg/ץ{PojJ۾PSQR0ɔ-,јV;iw{n1_Cj+w=eMbmO(nA/&yhҸKsZ}rբ4@l&_ D-^`BmcMZ&+MD2qhXݬn`rqgv}{1OJw ѳĦ8]'?%u7_N"ht|%[ eq*/ a;Fd$ʟUV̷|ִW=˚_G'Za'zZ`ng%.aJ QvoK/[-&Tнi7,4eҝK{JH)4ZH'l.߫kI Zm?77~ΛκNojݷmPN%t:㻂 0wyvhS/ ;hc4u)Q]![>IP`Ny&>3s;2(֢ o]sM*S[ǹcU>lrBtP;I5&WڕꊪjKo* 5/T0Yn*%?yŷ7^)i_kmlUN5Y]=ʐ/'B)JU7U֜/_Va8"JNa_|6H*$JӃ! FLHM6ii؍pP ރj{t͉ ,Y(nT)]K4I#y}/n7:.k9}6lH^s nn[ϒ(/YWˢD-T 1sD<\kQm}$WLswٞu)ϔ%ZuI&AHSKV߃3' G\lz\qU|h) \~I;zl(q剆v_u;j:jH>EV8?^hL$eҁVzޥڷ7KaIמ7^v2 iv?;̽QQ9)8H)GuC sv鶫Uҽq3!rI5ߒ$[>9]X׀^RzT~*5Z*R; =KGUi)L:QmsMǚ;[ұV>R|W}VǮ.k%RjJ&Qr8SR Zj[NζO %I_&A.fz\XwQn>+d ן~m?̙uz Fs4&5-gj[bN̤Vp=Z>AzUNDkiF@6QFWM3B:bLoeǵ;I%ݽV,HCLR濯&۾j,r=ܻ[4ڞA voB}oЭJ%gߡf$ZqŶJgK4>.^*)4P۷o"`iDMW*QӻKQm#DvshHv21d59~zT<зK'–uَVi+,^~ 9LnQ拹(J$7GcDw$c &2wJwf'{sL6[5*Jtǁi5esk_kvb9+JYo7D#`{?"}NQ"/vڀ9voclNVQu6iz/!B%E)ҜH' aNZV +Vv9VarN-*Jv)Fd_0\ɡbhwimĚ%jE\M|ӱ&>p0$ce;VcmjzY޵r (wcjFƸjbwk3:WeoEѓ|QC[4@p4oDO@E}7( L9hF|0%ɝ&?eU,۴d&H<X˫(S~ʀ3fT#O/ۥ #ZvׅaIZ^JK[KBi낁մ ;P~]b[F2щ <$A@QWU2dV͔^<٘ȯʞ̹*iXmQCC`)K=7 U4DCc}]w+7gGܻg7nI (rtK ײ uC{^v*5LjI6v_cW0ٵ~H&~P/NQKQSSc}PE5Ԥ0Y6C! Ѻw1⮩'=l!!;)GZGmmmKߓA6 /iӶMoQr0.w*x-#2#PH1/G.{um\ 3+4k%12=˹ѶD&_3~IWg Ģg%qRT{/3ͧWR{վbH{W?Kx-sF'egt4wJ^y&~w_◒"a Fl^Jxv.LbIZB;E"Yi^2 ǸYQ7WEgMd&05M;Բ3 =$WjPZH9qq[0 F*&Xl I2'dzk%,Q9 I 3P/8}{ K.ߢO_VRd9߇g'g?rA^%8i1FxM4:~C@hԠw>PJ?҅'A eé#N @ &!WQ߳\B ڷoz g-: @@˒ p@ HtJ㐓*yP%Za8y&@@U**?Fshhj!4+JdUZb DCeI4@ȚWh;jJ` 1)֝ <[@ d5@ &d]=DHzPO+- <;4(J  J9Kr%{@ #௎\M@@S@ |0 'j@ Azm'xŊ$AFP "ag(J B 'eI'@D@+! ںsZY4 )kժaʊ_ P.(KEB @ K Ée(@ @EeI\@ @bIeI,@A @ P.(KEB @ K(Kb-  @ r@YR.< @ X@Yh!P @ ʒr @ ĒʒXF  @ @\P<υ @ @ P2Z @ @"\y. @ @$$B @ @(%"s!@ @%%@ @@,)y @ @@, ,e(@ @EeI\@ @bIeI,@A @ P.(KEB @ K(Kb-  @ r@YR.< @ X@Yh!P @ ʒr @ ĒʒXF  @ @\P<υ @ @ P2Z @ @"\y. @ @$$B @ @(%"s!@ @%%@ @@,)y @ @@, ,e(@ @E dv2M4.\sg̘a{1SO"< @ 4H6>sr˥7LWN~L^̊+h>j{} @ PBYw} [U,@ @*@(eɝwi_z饗Nȑ#S4h}ygFm4lK.nsQG5\Ӭ:{5guu$7x=ѣ=֟6~o=I'Ν;7mi߾=ͱkV[qƥkf]i#<|')7Çav\& />*E6`{szK$]wȂE[coHXco{y[uL<پO߾}M>}rRc1wuW=7'N4ڵw@ @I e@fӦM3SNuR[)JX8M۶mСCS )FbAJk֜{gK!#n̘1XBEC暔n{gM_~i-Sodi"{Μ9l6 @ $@Ve_ߴnnW1bDjߑm4i$ ):uds=5A8qO6~̚5+?;)K7W\a6l3sNWf?lѢѐ@ @ Pgz w<N| ᢋ.2MsBJ'@lᆦYfv嗷+Ȫ#_Yb%RjY,䒩s?j8?vmF?_t(I"q= @ T.&DH44ECU@diRK-e?Τ+J~K/R,b;0.ӤIz͛gR- @ TMBKC g57Gu-N7ӦM{ZV* 0R[ȁ9$Dy饗?p)ad7ߴC4ʫjs=7Ĺϴ @ "Ш n&kRhuM:x`sWk)_]P;5`uMnVI]cGV#) Zvxʔ)v?Zˆ,-[z'-]a=Z @ @ ,9rd:ww;ݕ'l2CV)Сu/ ޽{9Kt uMD^p0uCd} NCu)|!DӥK9߱cGınҊ@V@ @ PmfUdGl&fy1?Yh9)+|%,2cV05j_̻k'27_^6BlV@ @ |ÉƏ~-;jԨ:E 5Z ,]Pde" @ "PtY>fܸqs4A @ @@\]Y^o Ϝ9ӬjF8wl!@ @@9 LYRΗ @ @ ,FW  @ @@%@YRIɻ@ @ P0%#@ @*ʒJM @ ,)!@ @ PIPTRl. @ LeI @ J"bw @ @`(K F @ T%  @ @@YR0B< @ @,] @ @(ʒ @ @DeI%&@ @@P @ @$(K*)6y@ @ &`x@ @@%@YRIɻ@ @ P0%#@ @*ʒJM @ ,)!@ @ PIPTRl. @ LeI @ J"bw @ @`(K F @ T%  @ @@YR0B< @ @,] @ @(ʒ @ @Dy% TI&ɓ'3fk@VYemvL۶m@ T.& J# !r"6a#eɠA$aA' @!0 '$@ зo_Ӿ}{%񈎪vJJ @@7@EPT T' ѭ[7ӪU+;$BA RЯ{L>}L޽ @ P$;=@DC &NXKƏ'N#@ PS[@b 7߷(EIFw"^LVLJ(q8ŝc @ \X$79@&MR艹{#)G4ٰSaX!  @ (K/@hn5F%͟>}zȟ&555)dq" @ l IvzT4M 4 q" %ʑ8a @h77OboP~jdݙg kmZQb))yEtR ұ҉~~åcm.I_Uy.%ӥbo@DeIbBâFVGU5R#\[ (;P\زm]:v.RPb-${+wī $@I%Z%IJdXw/&?au VQЌNAP^yI䔎B!+qkXwq|G@$P ^a.gn4a5A>3{J$ﶱǘ 9f< L {c#)7 #e5M_5b+./j9läe?KOQ[KptUe^5#D;/ L_ű.@(eIrF{n}; Ut@"%Bc1?#P9,%gJ銒 %ucǮ4 U.(n#ʓ$MYb @@h]иGQ<58/Nd}?˝g|~cA8EIzLt,%'z?g~}yB @ @Y``Qus13)6 SRI;6 @@Pάu`;ڟ&wR>csggy[(VSSַ.;̅7?Ml:uKs{B޼+'^WzըњH9c6nv_U72,j10b)ZxsO a[SiO>x{F]Z կ?w} ;.v]q~9Ꭴ,ObhU#C P${UYQ__h~s%Ǚן}<im>xMf 4lI>^DzN;zUuaRF+! vk[Օ.>"_W~:#w=b^tMM}3YdE2EšoQ"?zY~㨷w?Q?1dv:.oQ/uN1{w긱U]rΡf̠lV]ȉ$w gU"(Y)EQv@ (K)N5ѬUL4苬>ÿKvj?c~)ogڙ͚/f~aw5`VXu~ۛYО?̏YFifZtS쓅40O[4k,TPT n k;Ajf7wYw3cCX釟LӦMmdӍb-g?5ޜf7\|pT\!JJs~nT;k?£CLstڧfכW 3{'4/>Jkdv*(WXt\z'ƱS725] iػ}ڥ50}O8`,CrݥiIa5Hʑf~wlC=h%j@꼔%_ 0ŗYfN'5eܐ`K! 5-hi:eדe)7ןzk̦;kwg[ Ywe%7uc zYj{nrH*Ns·QPb cUoㇽ}iVF_HI^~ :Ԑfѥ5o8{}`zѪ>a7a{Z%S.ǫ*Ka\y}?%<69m0C hp=f+,)JdepKbK/V﷊e&$ۘ}`~^ Oqad63:(tݷ6{w'ulb=@ @JnYRw)\yV OmX8v "N4dԀkQ0C6n(Ӽ'ZD#4تH\DaEh'(xۍ wo/]O",fdi2-f36$t9` 6%ś,T$ẏ`>z?i^Ydt'l|79s$aѺ!D)Q_ 9^lԭǺ-_L @LʒLEn~kD YHut/K,==.욺uI7w tQCX9K$ߘ%[{%dj^SScOid!hS}LiF{D-*K6;95G{JPc#YzeM̼s;`8KΟk̤֜X|ٺ&rN<ψZ4'&L8LWXmEsGy+/pӯǚ8/:N`Hus@iWa~zMkSr *wRrIVF1_3FH缃#BR7?y'Ums֠s5Q: T럟Ί@s8W9]vy4/ P+0QRWG˶ >`f:0m“hÖ;nUf̋M6SJoqC+ruFtxIC۷w{8¤g3(]gP5mV:. @@j^`I_ZWu_A 7}*K7Vu5,J>J lTD]s2IjQ X:*se_@@Q`I>^sWYfPk%m4NQ%pBu|ꎣWطjqXФ7}IS@>_f  5/`e6͗LS ksذa_c,`L"3+節y5Iuw#Sˮs%lb  !@˒ܸ@@@rDNԃj    9!@$'.@@@@\ X+Wz    @N,ɉ@%@@@@ Wʕ    Kr2P @@@%r%    T@@@rE`I\    9!@$'.@@@@\ X+Wz    @N,ɉ@%@@@@ Wʕ    Kr2P @@@%r%    T@@@rE`I\    9!@$'.@@@@\ X+Wz    @N,ɉ@%@@@@ Wʕ    Kr2P @@@%r%    T@@@rE`I\    9!@$'.@@@@\ X+Wz    @N,ɉ@%@@@@ Wʕ    Kr2P @@@%r%    T@@@rE`I\    9!@$vV^mO9qQ   Ԝ@`ɮjEEEU~v՚_y SnuԱƍۥ^   K a~KzVJ' j(M>isL|@@@(L-K:vhvk.ovmQGeof|رc:? h[n܎Ϡu (ڮ5UТg . >g4HtUf2e. ӳgOO.Z/GL@@@@ d,Y[n6l05jiO?ݗ'ZWRm6rs]w[l_&lRfY ~ӢE w5K ͕W^Ya 2?$@@@@ (%N:N81?ϣ;Yr :4]M6AY}R(7[|.m[_;:nII+SoQyw <    Q)H|ˑ:ئnZlʤY*SN;M瞋-5ӟ.fͲ=ӞybP|D݆Ge3   #P?duYgˌAr=cڵ &.Z [n V7?ܚ4ibVY ;UB3/~p}Ե ,6!   e g޼yZ(Fp|ZDDqƦOT^z\I    @%z̭?Neood@@@̈#L^Ls~"V2    SKrL     0@@@@]`I_A    ,@@@@     H9) @@@]`I_A    ,@@@@     H9) @@@]`I_A    ,@@@@     H9) @@@]`I_A    ,@@@@     H9) @@@]`I_A    ,@@@@     H9) @@@]`I_A    ,@@@@     H9) @@@]`I_A    ,@@@@     H9) @@@]^GjZ`֬Y6qD+--uSg;n]vVRRbZ< @@R(ZK ׯ7'100a d  9*@70T r[@Iڷo:*CB@@, oEN@ %Vݻww]`j#JۤIlСnQ݆ "  @Ka P@qqkᷩN>}bNL<ѣ b=}ݘ"  KtɥA]@ 8 (ꦖ$ &~S@@(@˒&A*(FDJ+`#&MB@@,Il@a$%oa+>$@@H,@$ [@2 C.v)S&8  膓&/;/zp򟒒븖+鵥:N!zj=| IEEE\ {ZZZꦚOwSX}r:-Zt,OWdlG@(d%yzˏCiK] r%\^ʗ~ GJ(*ءiؤk(i3gݕ|  @l|_VN|蒡C>5Mj5O)22;V"̤~ҭ!  %aWȗMH{#٥m6)YsMyaS!P?^HøڳϚb}oeI>Oq,(ࡀ# T$K/>o--tʤ)6fh_a[Q~T*͗ͅf5 @@JЃZ0P{&gK,2&0w+BX롐&g^Ԯ[6#,, 6У<ȩ gU-b]'!  @ r[bzhzzxïOu1=|*T$ OɯY0 :$Sg lFѪJe]3Z-t|LG8UI @@ZAeՁ2˷:XUO]u_)l}|w+L9?T#źN5MަZ[L@@@`IQ:]iaN- | 1LقoK2+(R+T֭[뼕Qߩ(%྅_._O>8U!߅(l%?  <ҩPI`AJm{G_u8@;ߙ빼jۺ ֳXzZ XsL>6öqwko)X>ry~(5p~:Zgwq::)^7h>t۵)[I~:Cq\˘UVTVV> &}\TU5}Ϫm`L~{eSRZZj (i?Qk ӂ@@j@փ%EEE UX-|0 OvA/w5㍚4zܢkmtcUA u L<]F AXc%?M7kj̷n}=?q;w L chfVNW-W7l] 9s5MpmIlLnWuwy;>C:`=?Q/ ~J7Q^LH=T}nJ$xl3{D* |zr<@@)%Yq_e䭶V9vnqgomnw8@kwL5#"qX#ɞ)}[~ɗmἅ6n{/cݔ  lr<ܫ tq:.[J]a}.ӣ -j_=3E 4 (@IRkOf6Aj;`%Gv1Ww[#v WWs1u[f*K|Ύ8;uSfH-=4{fWdHݻG\hr䝏]ћl: ۵nȅTҀJםt=i=2^{4VgƝׄrM)ɿ{+H=Ʊ9/]>髚t7!x;@@ ,I@MP&b f;LZ3ju!ز$x\l#c m`gp{/[L{mWK]9u_azzx(kmWE{ϳO~]6:ZiElpL5̤\>a%+  $J7DϷ{֬5o`ZVCWN,̧)X]uU!-XtT SgA@@ Ҳ$@zHR+KAg5 <Ѿ聫UVV]ŹPTWZ-@Sf>{W?KB@@eI Բ$jȅ%y8i@@TN֛j#   "@ZX)@@@U`I^9   "@ZX)@@@U`I^ZVo [ HJB@@y%5    T@@@j^^W!jЮ];8qZ&E/7/4GKo ;0Gk՚5kVZjg@@H&@˒dB[/+AMUf\*  @^,I2%&MJaO"@w7x   KRf%%%:u|3 P۷ס{yf@@@0K(GM}s~jݣG&ku :&Xe~Zf .p2/[| [n}B@@.@$E!CP_~ef/zjG~c ӏ?ف [G؇&̛Ά)޳Q>ή[gć8@@KR 0۷H(**ltpƒz޶ (pO快~=S@@@ N&TvPZZj (_ 5Cf;jw_O_i_q_x+[iO>5WmܼqZP<>`=+|5ذ1 }鸯=?h_]tv@w #ۜ/<=𬭽tN n`Dl#<$?g-rMLw^zv'ju2}g66㣙v;bE, :ް`mן\xW|o^S}WQ@ &T@@@ pBBUM3gҊ2,mcmmp4=l۶ۖ7n{/ӯ8 o5~2Jú_=;oxF=\8,dO@&%H  VZdxe_gzI5'P,wwocn^|k=o5rkfG[WӿqȞhFnއʳnX>}QKVZ>kJF@@DtcHQoS1Kf}Xf[e z<3*:@@@@ 'K:x'LK/_رc|>իd#    P# %pmR/B=sny-[     %׷O>ٝ#˗/wsεiӦN:MMSOvj+ٳ=#VU+^zZ@IDAT;ܹ֭s|+V7l{챇5k;0:t;.@@@@UUkGm}mvuѸ$ \Pf_P9ӧOwnE7ҥ=:nܸqq3   D!e o׮]+D }6tS5j[@ kӦ[WN˖-+@@@@ S@]qIoSNn=z_)ϧM4PlÆ clzqӦMlO^wqG)    @`Ɂ?lfeWҶm[x^eBСC|T`h{[=e7}Moک_~0;    @Ec)Rbkm)xbGz*BEUTٳg[֭e˖)    @XȂ%aH>@@@@ Bun    @KԤ,@@@{%y 9@@@R`I   y/@$/!'   Q ,R@@@@ %@@@@ J%QjR      D)@$JMB@@@ X@@@@(DIY    Kr    (5) @@@^`I_BN@@@ X&e!   @ ,K    @KԤ,@@@{%y 9@@@R`I   y/@$/!'   Q ,R@@@@ %@@@@ J%QjR      D)@$JMB@@@ X@@@@(DIY    Kr    (5) @@@^`I_BN@@@ X&e!   @ ,]b+*** H    `F@@@ Wl~e'>_~&lau|'@@@@rUhu,Uwŧ frYgΜi[U4xܧg[Trڱ]\ud-l$]c'q;٨7@@@( u,@@@@ PdC@@@ XRיD@@@KBB%6#=h[Ͽ¹ gUe#   Ԭ q{%?WZҲˬS#W/ݕ~ZfdVE   0ҙݭcnu+]{zti綯Z|O+qn   zyfSX XuN[ FM)aslSI&_lȉeZ^{UO_vԭcGK5M[J;#6^r-^+-6yZe   T)`I؇*+^γ[μ)^ܱLdvcg4vEG\h̦];fwp~ZjusmZAlZvW0#yQKǺf͚U#:nX!  pҼis1;*hxD]pn~kY2}Y(916i+6'/mv[;}4]y>z#3O8o{zڿnVX,    @x, _ٲe$Ɵo-nSl]k_}bz3z}b3jU0֝F頓6gQWcb{ka/ۮv^7v    d=XzꄵUuP9snWq8(};[7Hzuyߗ)zM7qjr_;ŷ-feė5kr5 5ڤթᐬekY%;nn[٨7@@@( Kj ~i; >u;fvn^U0Qz'bOzX됵m&w]str3vؚ}8גdCc\/<<\b 7ns^I_}1 3r   7=_fA|鉗LWoѲj rۨ6[شצtӲZ٣ rMt*5Ë,)Qk\4{4&\Y8oGEo!!   @En1Ť^^{RTu`w̑WVz2)T`d O#jqIAokivlq=Yweɗn70%   aF)-ъ+VMK.M    @ $ c v>so߾J2$>     PKuͶzkh+}6l3y~m>l{_3O=qC41bkҬY3W^>}l,]ts1裏Z]מۻŋ[޽mmƎO3G#<[]Tɓa@@@ Whu,UvUc()0re!bl{$ϪU^{-͟WUJ0cӦMYg0`]znC.С<;찃+PeI>}zM~G#86oqY2S@@@(L-K7nlrS3gmv駟noV\L-H|/^zڵk}O?jF5 'ϟ?{=oげ>[\ (;f7l.suU.PKpZ(nEvԩo0aB|@@@(\*_ZGhVD>}„ ) @@T`I_%2dȐ+P2l0ײW(`BB@@2 X Z8(X~PФ8~ ua 83   c n+`DJj~P`kwթqT@@ KjU*>a ^fU{NԄ"  @m XR[$5=DK4xקfk@@@ _kųU/j_ۚe~/))1?-IZulC-ʟKevږO]iB$ݤ{lKt ]R7|+߷de;  , zd@W "U EEEj^:>Ό w$`9^[սE>=>   @VZ§`syOkZ \SU]ZS(*קL}9LC:-t{Kt#  /YiY7?NϾg]JZvmZ1;k?qM4M~9s_|ZS֪>bØũQ?묲ob8]v[N/; =ǯWp$yC(}[naGtha]-} ޫ*3:z7l0$  ,% ICWHisl{C mtYfZ(i@ x[uLU^0u=>tQA6pn`Ey̓Z*#ź{QܫQՏr@@(T%+D}Cm-~>TZ+` @AT)X~YO%-_J?-5ULǬjDd D>M~hU}dђ*_\>+~]Qeح[xu|C 3Z$HoGR[G^ʹޡNF@@T`I%`ƽWJ"ZyqWZG۟Q~.)--E֟ulv3{G^;2BajV L{mZ@;=m f_?Ȏl~Oܫg^uX˧{ w t7غD.]tSp0]oޟ݉vHlo{5Y^#  @ ,H 4ځ'EIy[k2Pj*0޻Y[9=Îk'[guʬ_˻UђgZ[ε>]rU-Op@b;.B$쁃|o[o\Olvn!{Z\b|SsMM,nip}+aKW'{^>u9.mӏ73ь~) _4j`j!O4wnaPUIsذa.K6zOVFX_Nz5v_o0B;Wצ[[^|}xki[  oX ~M|f?lb'i]1}zTu^yn߽ƦDw5uTCtcTUI/(OP> Juy=gmUe|p&r* @@ L,ä”J {X}ɗmǽvt|5+w,y}kvɑW8$ לp[|v/>-mMCݼSށ}Sa嶕rmCmz9 .QRXQ_־N:G*>YLCx&dqoܼ .Dž6yCY-g?̟٩L$x{;V13zhG*rߔ WKI'ثV-Q3p/} 3n rǭHk^!,^ 2$An6yX wnsUNbǨi۶ƺlZ_{ ҆MvQy'OM6S?w[E)|BsLcm#//sr8&#  /YoY`BO-QXBaҶng 6n?0 o5iD0)Q^]^>|z V}̝vWlUN5UjY);*)1}:l cw7._*|R&)%sHy.7#nA]|%ϗvcѨ0)x={Kxw@:k3,RZ{mpIFr &-Ϝ9FS>몚j!%U6?}>UuuѶWХ;/w>v߷bA˱< (; W=|?S}>i:vY-ƿ!*/aƖӵSzwl̰߬Ʀޏ @48uKe3)~>xO}훙gX[8o :ؿI]}fu׶b(XtyεMcT:mI#揮\4 L&LH}z _o6@@@ ޲$y?ZhMcnua Nra}\{ukD8Cuuſ5M7= t3ug}%{po9%ק쪃HAf5+-Z4u Rf@lA'춇ykIG_]!iIZѠ[y+gaGI "zTy5V))TchVQR>-[?6_VV-t؞3n"GZtR[wucc -(QEIt[>[`Ԛ'_=.)oɢ%.ؙJdJ,PS{1E@@ Ś\߮P拺14rm%Zs_t Q]`]hlnק2]t{-~ɯzmܢqJ[| =LSZe]>"R&Lī^^:='gLZRkU @-Ov{cl&ިVn`70)Uv :4!.'`}ψ.9#)u'|t7LRT|R(kOW]\@@%@˒J| @.zԀzI!"4o<@ү6oU6:UDulscn>P,o*Tbfdl\k"P|j:=t vg9)Z>ZUpoY=wp]CUSA&g UQ2!  P@K~0vC){BjQREu?*Yu_5^|볷ʒu >|+ۧ6:Z>zO٬_}G]&Q7[{[CWe^S{1E@@ 6~A.--wFKmI[k$̛rEQfP?z@@V}/IZs iS8:Ҭ@f  M&iO{(>wE:GkP+:@@BJ?' /Y9lWpUV%p37oLZTWğ[?a%9Gaܚ ާ ZSߍi ({_[]` t[ްҵ`?@@rE +oɕMzx6lX臈T5;Гd|np_=xڼ[~l.Gy*e-1?Q߫ &,  9"@$ɅÑh&?dڬy=iV}SVvpX_;l @UKoq굶ټܫW-^ @@| XoWw/vTWT `1{K 4DTZ"^$D% )Xz@A.RK@SB9~>緾o=g5N`ZkKfT?>%.MFa "\ @<@q$I8h.>N.S"N @^ Jy'@qJPL0iI,Z( @!Am1̅khSNM˳[SQq2LoR .Ś&s-{| @HȒL@,/9#6#&S|BP  @#`8: 1':X#D|;lK)Hkl)lbQx @ /~I΄@,Vj[fIbD @~ [&@Me2AFEAy# @ @`%Pw -SOb;4)#ʧ% LԖ  @TG@:u$ @ @@<  @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKSJB @T@@ @ @@uKfΜ&M7:w$ @ @.4 <]S @ @G`2Ey'?n @ 0%ӧOO|p;… Ϗ?t9o1͙3'tAi}>;5\M:lߟ:l|iұV\8t.,mK:u]ftI'/; @ @@RoNo|g^w1M:5]zj;{G:W#U,wߝvelnJ{W{-5ys/ @ @Y+vDƺiN?#MN|әgN>(YdɈ}W_}us @ @NJK]`6ZSN9%i)z1wl4ʢE:q{2 @ 0@`Iu}] 7vmoV[Տϵ[M2m @ RV.4yrS=S>m @ @A zΚ5khDcMٴ;dY 7zku]_\ L > @ @(,[̙3G=իG?׽uݻ[O~X;#F6Hz=I=d ~ @H@#>7TX3C@LljSEԉD @@{;QLd~S\/X"0#J J @/ XҾv|IDAT#  D'(7b*3R) @p8ݿRG#QߤM_)ֿR,):đI&eל?b1bޢZ*{//_^h&m+e vҶ+PF3qah> @c XPtϟudZ vޡmhbD&YB,#+9m;mL/-5R? @(З%;9-{l9{7;-_̹K8-䁒4P {]="n6ƶaQ?e󕸤, @Ȓk׮mQ6_t7]xXi^:ݖn[~[G'&~k~Ͽ,]tnZ= ':ŧE{vOe[BSp,EsVry/wC Й@_FtV>#u^WrWEڮ֩ K&E$RK|oIQ1ի"0uJw[0o(b=uVqA 01< zχ Zqi%%ya>Q0YLa*^vg'̵(n ^u{=^K( @) ߚZjaKI~!/Mx~sycJCtRusWBb&F}btXj$@ XE Ov??WQu㾊յ5U_eyg7M>bS+!=O<ݔmNyq$CCmk$nqݺYVjqOW'{jzVK_\F @YJuį(N)NDtJm=K>Iϳ׵^0o;vís.oОS `{'ҼL3ef[@I^@x̛%>Jz˧{{=:/Vcݤ_}%}ǾK9VK_TF @ Kj0`ŒoC'=2Mo9 /曤zd]o ?~zi܇NM>l;wb5:eӍYڿ6C?\NB5S~O¨mk&p!'#oG?j՝%pΉ)/܅NjM @eOé zcI4sn)ُWA'}vt+FQ:қJ=\:ϏK_>KǾ/K{P:N6V6;&MI={4-b^4=ƈ?3_u[bzϧ)mцnU8ێs\x߿)mf}Sio6u\o^M[]?/K/W2E "1CdH[b5ڹi fQ qA'mq D +ZӧO2E}F=Fۋi:y:ln靇3[[ƅv( @@}7eR|eJ7:.:1"֌F+\ץ/-]|%sٷq|ysg/Ȃ<S VEgkRIgy$PEצwxUvo}[cKyҠ<+^FK;)֜hn-o=::/I޵"}ǴEXX?}q\_v#:V*ފzP&/+.ݍAj[ErQugbVM7߳>m+B.6 @@Z?rW+{< q_i;ffe߾!hH;.Ol NW W?qIߧn35kMOG3(6hca;|W<ʻWȷ?SΗ6K_GmdrGȦAe#tjkIXOw}-R.s\FB;ſ=)?0yxкN#wzL;m5k/&|C-`|qn>ȶZ,X3b> @WȒZFg$OrumcpK<^t [O Ԧ4H eO;_[rq:~ikI/BO?>}ҴimN=0Yei{fgx-o=3Ԧ=|='Ԧ.vuz.ۼ]-0?jʥWfSl~iQӎB X7:(oGztLnL݉t'o=5z}1EߩXzjVM4Og?56͎?+^x~BŅ'괺xl[a⺹IȊ#b{)2G-qcWFcs @Ȓn@7Wj^­FDb;}gν Ďw[_G(;o􇲅K#P?eEmdZI|'w6~q1ϻ?=Y*ǬYegf̞Q2F6=#JT#%b JVN]?]UF~yLK>Q,v;Z7'{ig Mu<ʹlZܴtY?:hoHx|\>twJ'cҫ0 &S;lVF۶6U؅a?Sʶ{˝̊~TqVm*( @@,QF#23cv%qֵ8B㾊UL1ҦYdȐfyV%eNb|Xa,{iY͎t[XwM񾛕 sۊE}{/Y3mn6nӨʦAղ唏 @`bL9]ŠyI&-0x-st5)#@nࢅٺ6jfjm!_Y}@1Xr3xOmuJ];K@Q2jmL!@{[G](bELe)ӁqORi$mkoQ,)Ƌf;c<> @@2/ѹos#p{yg14zϷ=YqE@)a; @4޸:+jժQ1R"5`D5 #c;O%w @fI](>gcRS" @ _pj& ^'( + Ւ @@w,鮧 PR O=XjOE6mZH6 @=, @ @)`p֛R @ @=, @ @) X2 @ @@Kz @ @p  g)5 @ #:- @ 0%YoJM @H@GNK @ Ɍ "${base}.go" golang-github-alecthomas-participle-v2-2.1.4/struct.go000066400000000000000000000112511505300366400227170ustar00rootroot00000000000000package participle import ( "fmt" "reflect" "strconv" "strings" "text/scanner" "unicode/utf8" "github.com/alecthomas/participle/v2/lexer" ) // A structLexer lexes over the tags of struct fields while tracking the current field. type structLexer struct { s reflect.Type field int indexes [][]int lexer *lexer.PeekingLexer } func lexStruct(s reflect.Type) (*structLexer, error) { indexes, err := collectFieldIndexes(s) if err != nil { return nil, err } slex := &structLexer{ s: s, indexes: indexes, } if len(slex.indexes) > 0 { tag := fieldLexerTag(slex.Field().StructField) slex.lexer, err = lexer.Upgrade(newTagLexer(s.Name(), tag)) if err != nil { return nil, err } } return slex, nil } // NumField returns the number of fields in the struct associated with this structLexer. func (s *structLexer) NumField() int { return len(s.indexes) } type structLexerField struct { reflect.StructField Index []int } // Field returns the field associated with the current token. func (s *structLexer) Field() structLexerField { return s.GetField(s.field) } func (s *structLexer) GetField(field int) structLexerField { if field >= len(s.indexes) { field = len(s.indexes) - 1 } return structLexerField{ StructField: s.s.FieldByIndex(s.indexes[field]), Index: s.indexes[field], } } func (s *structLexer) Peek() (*lexer.Token, error) { field := s.field lex := s.lexer for { token := lex.Peek() if !token.EOF() { token.Pos.Line = field + 1 return token, nil } field++ if field >= s.NumField() { t := lexer.EOFToken(token.Pos) return &t, nil } ft := s.GetField(field).StructField tag := fieldLexerTag(ft) var err error lex, err = lexer.Upgrade(newTagLexer(ft.Name, tag)) if err != nil { return token, err } } } func (s *structLexer) Next() (*lexer.Token, error) { token := s.lexer.Next() if !token.EOF() { token.Pos.Line = s.field + 1 return token, nil } if s.field+1 >= s.NumField() { t := lexer.EOFToken(token.Pos) return &t, nil } s.field++ ft := s.Field().StructField tag := fieldLexerTag(ft) var err error s.lexer, err = lexer.Upgrade(newTagLexer(ft.Name, tag)) if err != nil { return token, err } return s.Next() } func fieldLexerTag(field reflect.StructField) string { if tag, ok := field.Tag.Lookup("parser"); ok { return tag } return string(field.Tag) } // Recursively collect flattened indices for top-level fields and embedded fields. func collectFieldIndexes(s reflect.Type) (out [][]int, err error) { if s.Kind() != reflect.Struct { return nil, fmt.Errorf("expected a struct but got %q", s) } defer decorate(&err, s.String) for i := 0; i < s.NumField(); i++ { f := s.Field(i) switch { case f.Anonymous && f.Type.Kind() == reflect.Struct: // Embedded struct. children, err := collectFieldIndexes(f.Type) if err != nil { return nil, err } for _, idx := range children { out = append(out, append(f.Index, idx...)) } case f.PkgPath != "": continue case fieldLexerTag(f) != "": out = append(out, f.Index) } } return } // tagLexer is a Lexer based on text/scanner.Scanner type tagLexer struct { scanner *scanner.Scanner filename string err error } func newTagLexer(filename string, tag string) *tagLexer { s := &scanner.Scanner{} s.Init(strings.NewReader(tag)) lexer := &tagLexer{ filename: filename, scanner: s, } lexer.scanner.Error = func(s *scanner.Scanner, msg string) { // This is to support single quoted strings. Hacky. if !strings.HasSuffix(msg, "char literal") { lexer.err = fmt.Errorf("%s: %s", lexer.scanner.Pos(), msg) } } return lexer } func (t *tagLexer) Next() (lexer.Token, error) { typ := t.scanner.Scan() text := t.scanner.TokenText() pos := lexer.Position(t.scanner.Position) pos.Filename = t.filename if t.err != nil { return lexer.Token{}, t.err } return textScannerTransform(lexer.Token{ Type: lexer.TokenType(typ), Value: text, Pos: pos, }) } func textScannerTransform(token lexer.Token) (lexer.Token, error) { // Unquote strings. switch token.Type { case scanner.Char: // FIXME(alec): This is pretty hacky...we convert a single quoted char into a double // quoted string in order to support single quoted strings. token.Value = fmt.Sprintf("\"%s\"", token.Value[1:len(token.Value)-1]) fallthrough case scanner.String: s, err := strconv.Unquote(token.Value) if err != nil { return lexer.Token{}, Errorf(token.Pos, "%s: %q", err.Error(), token.Value) } token.Value = s if token.Type == scanner.Char && utf8.RuneCountInString(s) > 1 { token.Type = scanner.String } case scanner.RawString: token.Value = token.Value[1 : len(token.Value)-1] default: } return token, nil } golang-github-alecthomas-participle-v2-2.1.4/struct_test.go000066400000000000000000000037241505300366400237640ustar00rootroot00000000000000package participle import ( "reflect" "testing" "text/scanner" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2/lexer" ) func TestStructLexerTokens(t *testing.T) { type testScanner struct { A string `12` B string `34` } scan, err := lexStruct(reflect.TypeOf(testScanner{})) require.NoError(t, err) t12 := lexer.Token{Type: scanner.Int, Value: "12", Pos: lexer.Position{Filename: "testScanner", Line: 1, Column: 1}} t34 := lexer.Token{Type: scanner.Int, Value: "34", Pos: lexer.Position{Filename: "B", Line: 2, Column: 1}} require.Equal(t, t12, *mustPeek(scan)) require.Equal(t, 0, scan.field) require.Equal(t, t12, *mustNext(scan)) require.Equal(t, t34, *mustPeek(scan)) require.Equal(t, 0, scan.field) require.Equal(t, t34, *mustNext(scan)) require.Equal(t, 1, scan.field) require.True(t, mustNext(scan).EOF()) } func TestStructLexer(t *testing.T) { g := struct { A string `"a"|` B string `"b"` }{} gt := reflect.TypeOf(g) r, err := lexStruct(gt) require.NoError(t, err) f := []structLexerField{} s := "" for { _, err := r.Peek() require.NoError(t, err) rn, err := r.Next() require.NoError(t, err) if rn.EOF() { break } f = append(f, r.Field()) s += rn.String() } require.Equal(t, `a|b`, s) f0 := r.GetField(0) f1 := r.GetField(1) require.Equal(t, []structLexerField{f0, f0, f1}, f) } type testEmbeddedIndexes struct { A string `@String` B string `@String` } func TestCollectFieldIndexes(t *testing.T) { var grammar struct { testEmbeddedIndexes C string `@String` } typ := reflect.TypeOf(grammar) indexes, err := collectFieldIndexes(typ) require.NoError(t, err) require.Equal(t, [][]int{{0, 0}, {0, 1}, {1}}, indexes) } func mustPeek(scan *structLexer) *lexer.Token { token, err := scan.Peek() if err != nil { panic(err) } return token } func mustNext(scan *structLexer) *lexer.Token { // nolint: interfacer token, err := scan.Next() if err != nil { panic(err) } return token } golang-github-alecthomas-participle-v2-2.1.4/validate.go000066400000000000000000000020631505300366400231650ustar00rootroot00000000000000package participle import ( "fmt" "strings" ) // Perform some post-construction validation. This currently does: // // Checks for left recursion. func validate(n node) error { checked := map[*strct]bool{} seen := map[node]bool{} return visit(n, func(n node, next func() error) error { if n, ok := n.(*strct); ok { if !checked[n] && isLeftRecursive(n) { return fmt.Errorf("left recursion detected on\n\n%s", indent(n.String())) } checked[n] = true if seen[n] { return nil } } seen[n] = true return next() }) } func isLeftRecursive(root *strct) (found bool) { defer func() { _ = recover() }() seen := map[node]bool{} _ = visit(root.expr, func(n node, next func() error) error { if found { return nil } switch n := n.(type) { case *strct: if root.typ == n.typ { found = true } case *sequence: if !n.head { panic("done") } } if seen[n] { return nil } seen[n] = true return next() }) return } func indent(s string) string { return " " + strings.Join(strings.Split(s, "\n"), "\n ") } golang-github-alecthomas-participle-v2-2.1.4/validate_test.go000066400000000000000000000020521505300366400242220ustar00rootroot00000000000000package participle_test import ( "testing" require "github.com/alecthomas/assert/v2" "github.com/alecthomas/participle/v2" ) type leftRecursionSimple struct { Begin string ` @Ident` More *leftRecursionSimple `| @@ "more"` } func TestValidateLeftRecursion(t *testing.T) { _, err := participle.Build[leftRecursionSimple]() require.Error(t, err) require.Equal(t, err.Error(), `left recursion detected on LeftRecursionSimple = | (LeftRecursionSimple "more") .`) } type leftRecursionNestedInner struct { Begin string ` @Ident` Next *leftRecursionNested `| @@` } type leftRecursionNested struct { Begin string ` @Ident` More *leftRecursionNestedInner `| @@ "more"` } func TestValidateLeftRecursionNested(t *testing.T) { _, err := participle.Build[leftRecursionNested]() require.Error(t, err) require.Equal(t, err.Error(), `left recursion detected on LeftRecursionNested = | (LeftRecursionNestedInner "more") . LeftRecursionNestedInner = | LeftRecursionNested .`) } golang-github-alecthomas-participle-v2-2.1.4/visit.go000066400000000000000000000022321505300366400225300ustar00rootroot00000000000000package participle import "fmt" // Visit all nodes. // // Cycles are deliberately not detected, it is up to the visitor function to handle this. func visit(n node, visitor func(n node, next func() error) error) error { return visitor(n, func() error { switch n := n.(type) { case *disjunction: for _, child := range n.nodes { if err := visit(child, visitor); err != nil { return err } } return nil case *strct: return visit(n.expr, visitor) case *custom: return nil case *union: for _, member := range n.disjunction.nodes { if err := visit(member, visitor); err != nil { return err } } return nil case *sequence: if err := visit(n.node, visitor); err != nil { return err } if n.next != nil { return visit(n.next, visitor) } return nil case *parseable: return nil case *capture: return visit(n.node, visitor) case *reference: return nil case *negation: return visit(n.node, visitor) case *literal: return nil case *group: return visit(n.expr, visitor) case *lookaheadGroup: return visit(n.expr, visitor) default: panic(fmt.Sprintf("%T", n)) } }) }