pax_global_header 0000666 0000000 0000000 00000000064 15053003664 0014514 g ustar 00root root 0000000 0000000 52 comment=d70698c64070ec56f6e6abb6254775a0ad58c122
golang-github-alecthomas-participle-v2-2.1.4/ 0000775 0000000 0000000 00000000000 15053003664 0021044 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/.github/ 0000775 0000000 0000000 00000000000 15053003664 0022404 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/.github/FUNDING.yml 0000664 0000000 0000000 00000000025 15053003664 0024216 0 ustar 00root root 0000000 0000000 github: [alecthomas]
golang-github-alecthomas-participle-v2-2.1.4/.github/workflows/ 0000775 0000000 0000000 00000000000 15053003664 0024441 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/.github/workflows/ci.yml 0000664 0000000 0000000 00000001217 15053003664 0025560 0 ustar 00root root 0000000 0000000 on:
push:
branches:
- master
pull_request:
name: CI
jobs:
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Init Hermit
run: ./bin/hermit env -r >> $GITHUB_ENV
- name: Test Participle
run: go test ./...
- name: Test Examples
run: cd ./_examples && go test ./...
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Init Hermit
run: ./bin/hermit env -r >> $GITHUB_ENV
- name: golangci-lint
run: golangci-lint run
golang-github-alecthomas-participle-v2-2.1.4/.github/workflows/release.yml 0000664 0000000 0000000 00000000511 15053003664 0026601 0 ustar 00root root 0000000 0000000 name: Release
on:
push:
tags:
- 'v*'
jobs:
release:
name: Release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- run: ./bin/hermit env --raw >> $GITHUB_ENV
- run: goreleaser release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
golang-github-alecthomas-participle-v2-2.1.4/.golangci.yml 0000664 0000000 0000000 00000003333 15053003664 0023432 0 ustar 00root root 0000000 0000000 run:
tests: true
skip-dirs:
- _examples
output:
print-issued-lines: false
linters:
enable-all: true
disable:
- maligned
- lll
- gocyclo
- gochecknoglobals
- wsl
- whitespace
- godox
- funlen
- gocognit
- gomnd
- goerr113
- godot
- nestif
- testpackage
- nolintlint
- exhaustivestruct
- wrapcheck
- gci
- gofumpt
- gocritic
- nlreturn
- errorlint
- nakedret
- forbidigo
- revive
- cyclop
- ifshort
- paralleltest
- interfacer
- scopelint
- golint
- wastedassign
- forcetypeassert
- gomoddirectives
- varnamelen
- exhaustruct
- ireturn
- nonamedreturns
- errname
- nilnil
- maintidx
- unused # Does not work with type parameters
- dupword
- depguard
- mnd
- recvcheck
- perfsprint
- predeclared
linters-settings:
govet:
check-shadowing: true
gocyclo:
min-complexity: 10
dupl:
threshold: 100
goconst:
min-len: 8
min-occurrences: 3
exhaustive:
default-signifies-exhaustive: true
issues:
max-per-linter: 0
max-same: 0
exclude-use-default: false
exclude:
# Captured by errcheck.
- '^(G104|G204|G307):'
# Very commonly not checked.
- 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked'
- 'exported method `(.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos)` should have comment or be unexported'
- 'uses unkeyed fields'
- 'declaration of "err" shadows declaration'
- 'bad syntax for struct tag key'
- 'bad syntax for struct tag pair'
- '^ST1012'
golang-github-alecthomas-participle-v2-2.1.4/.goreleaser.yml 0000664 0000000 0000000 00000001264 15053003664 0024000 0 ustar 00root root 0000000 0000000 project_name: participle
release:
github:
owner: alecthomas
name: participle
brews:
-
install: bin.install "participle"
env:
- CGO_ENABLED=0
builds:
- goos:
- linux
- darwin
- windows
goarch:
- arm64
- amd64
- "386"
goarm:
- "6"
dir: ./cmd/participle
main: .
ldflags: -s -w -X main.version={{.Version}}
binary: participle
archives:
-
format: tar.gz
name_template: '{{ .Binary }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{
.Arm }}{{ end }}'
files:
- COPYING
- README*
snapshot:
name_template: SNAPSHOT-{{ .Commit }}
checksum:
name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt'
golang-github-alecthomas-participle-v2-2.1.4/CHANGES.md 0000664 0000000 0000000 00000002570 15053003664 0022442 0 ustar 00root root 0000000 0000000
- [v2](#v2)
## v2
v2 was released in November 2020. It contains the following changes, some of
which are backwards-incompatible:
- Added optional `LexString()` and `LexBytes()` methods that lexer
definitions can implement to fast-path lexing of bytes and strings.
- A new stateful lexer has been added.
- A `filename` must now be passed to all `Parse*()` and `Lex*()` methods.
- The `text/scanner` lexer no longer automatically unquotes strings or
supports arbitary length single quoted strings. The tokens it produces are
identical to that of the `text/scanner` package. Use `Unquote()` to remove
quotes.
- `Tok` and `EndTok` will no longer be populated.
- If a field named `Token []lexer.Token` exists it will be populated with the
raw tokens that the node parsed from the lexer.
- Support capturing directly into lexer.Token fields. eg.
type ast struct {
Head lexer.Token `@Ident`
Tail []lexer.Token `@(Ident*)`
}
- Add an `experimental/codegen` for stateful lexers. This provides ~10x
performance improvement with zero garbage when lexing strings.
- The `regex` lexer has been removed.
- The `ebnf` lexer has been removed.
- All future work on lexing will be put into the stateful lexer.
- The need for `DropToken` has been removed.
golang-github-alecthomas-participle-v2-2.1.4/COPYING 0000664 0000000 0000000 00000002044 15053003664 0022077 0 ustar 00root root 0000000 0000000 Copyright (C) 2017-2022 Alec Thomas
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
golang-github-alecthomas-participle-v2-2.1.4/README.md 0000664 0000000 0000000 00000060400 15053003664 0022323 0 ustar 00root root 0000000 0000000 # A dead simple parser package for Go
[](https://pkg.go.dev/github.com/alecthomas/participle/v2) [](https://github.com/alecthomas/participle/actions)
[](https://goreportcard.com/report/github.com/alecthomas/participle/v2) [](https://gophers.slack.com/messages/CN9DS8YF3)
- [V2](#v2)
- [Introduction](#introduction)
- [Tutorial](#tutorial)
- [Tag syntax](#tag-syntax)
- [Overview](#overview)
- [Grammar syntax](#grammar-syntax)
- [Capturing](#capturing)
- [Capturing boolean value](#capturing-boolean-value)
- ["Union" types](#union-types)
- [Custom parsing](#custom-parsing)
- [Lexing](#lexing)
- [Stateful lexer](#stateful-lexer)
- [Example stateful lexer](#example-stateful-lexer)
- [Example simple/non-stateful lexer](#example-simplenon-stateful-lexer)
- [Experimental - code generation](#experimental---code-generation)
- [Options](#options)
- [Examples](#examples)
- [Performance](#performance)
- [Concurrency](#concurrency)
- [Error reporting](#error-reporting)
- [Comments](#comments)
- [Limitations](#limitations)
- [EBNF](#ebnf)
- [Syntax/Railroad Diagrams](#syntaxrailroad-diagrams)
## V2
This is version 2 of Participle.
It can be installed with:
```shell
$ go get github.com/alecthomas/participle/v2@latest
```
The latest version from v0 can be installed via:
```shell
$ go get github.com/alecthomas/participle@latest
```
## Introduction
The goal of this package is to provide a simple, idiomatic and elegant way of
defining parsers in Go.
Participle's method of defining grammars should be familiar to any Go
programmer who has used the `encoding/json` package: struct field tags define
what and how input is mapped to those same fields. This is not unusual for Go
encoders, but is unusual for a parser.
## Tutorial
A [tutorial](TUTORIAL.md) is available, walking through the creation of an .ini parser.
## Tag syntax
Participle supports two forms of struct tag grammar syntax.
The easiest to read is when the grammar uses the entire struct tag content, eg.
```go
Field string `@Ident @("," Ident)*`
```
However, this does not coexist well with other tags such as JSON, etc. and
may cause issues with linters. If this is an issue then you can use the
`parser:""` tag format. In this case single quotes can be used to quote
literals making the tags somewhat easier to write, eg.
```go
Field string `parser:"@ident (',' Ident)*" json:"field"`
```
## Overview
A grammar is an annotated Go structure used to both define the parser grammar,
and be the AST output by the parser. As an example, following is the final INI
parser from the tutorial.
```go
type INI struct {
Properties []*Property `@@*`
Sections []*Section `@@*`
}
type Section struct {
Identifier string `"[" @Ident "]"`
Properties []*Property `@@*`
}
type Property struct {
Key string `@Ident "="`
Value *Value `@@`
}
type Value struct {
String *string ` @String`
Float *float64 `| @Float`
Int *int `| @Int`
}
```
> **Note:** Participle also supports named struct tags (eg. Hello string `parser:"@Ident"`).
A parser is constructed from a grammar and a lexer:
```go
parser, err := participle.Build[INI]()
```
Once constructed, the parser is applied to input to produce an AST:
```go
ast, err := parser.ParseString("", "size = 10")
// ast == &INI{
// Properties: []*Property{
// {Key: "size", Value: &Value{Int: &10}},
// },
// }
```
## Grammar syntax
Participle grammars are defined as tagged Go structures. Participle will
first look for tags in the form `parser:"..."`. It will then fall back to
using the entire tag body.
The grammar format is:
- `@` Capture expression into the field.
- `@@` Recursively capture using the fields own type.
- `` Match named lexer token.
- `( ... )` Group.
- `"..."` or `'...'` Match the literal (note that the lexer must emit tokens matching this literal exactly).
- `"...":` Match the literal, specifying the exact lexer token type to match.
- ` ...` Match expressions.
- ` | | ...` Match one of the alternatives. Each alternative is tried in order, with backtracking.
- `~` Match any token that is _not_ the start of the expression (eg: `@~";"` matches anything but the `;` character into the field).
- `(?= ... )` Positive lookahead group - requires the contents to match further input, without consuming it.
- `(?! ... )` Negative lookahead group - requires the contents not to match further input, without consuming it.
The following modifiers can be used after any expression:
- `*` Expression can match zero or more times.
- `+` Expression must match one or more times.
- `?` Expression can match zero or once.
- `!` Require a non-empty match (this is useful with a sequence of optional matches eg. `("a"? "b"? "c"?)!`).
Notes:
- Each struct is a single production, with each field applied in sequence.
- `@` is the mechanism for capturing matches into the field.
- if a struct field is not keyed with "parser", the entire struct tag
will be used as the grammar fragment. This allows the grammar syntax to remain
clear and simple to maintain.
## Capturing
Prefixing any expression in the grammar with `@` will capture matching values
for that expression into the corresponding field.
For example:
```go
// The grammar definition.
type Grammar struct {
Hello string `@Ident`
}
// The source text to parse.
source := "world"
// After parsing, the resulting AST.
result == &Grammar{
Hello: "world",
}
```
For slice and string fields, each instance of `@` will accumulate into the
field (including repeated patterns). Accumulation into other types is not
supported.
For integer and floating point types, a successful capture will be parsed
with `strconv.ParseInt()` and `strconv.ParseFloat()` respectively.
A successful capture match into a `bool` field will set the field to true.
Tokens can also be captured directly into fields of type `lexer.Token` and
`[]lexer.Token`.
Custom control of how values are captured into fields can be achieved by a
field type implementing the `Capture` interface (`Capture(values []string)
error`).
Additionally, any field implementing the `encoding.TextUnmarshaler` interface
will be capturable too. One caveat is that `UnmarshalText()` will be called once
for each captured token, so eg. `@(Ident Ident Ident)` will be called three times.
### Capturing boolean value
By default, a boolean field is used to indicate that a match occurred, which
turns out to be much more useful and common in Participle than parsing true
or false literals. For example, parsing a variable declaration with a
trailing optional syntax:
```go
type Var struct {
Name string `"var" @Ident`
Type string `":" @Ident`
Optional bool `@"?"?`
}
```
In practice this gives more useful ASTs. If bool were to be parsed literally
then you'd need to have some alternate type for Optional such as string or a
custom type.
To capture literal boolean values such as `true` or `false`, implement the
Capture interface like so:
```go
type Boolean bool
func (b *Boolean) Capture(values []string) error {
*b = values[0] == "true"
return nil
}
type Value struct {
Float *float64 ` @Float`
Int *int `| @Int`
String *string `| @String`
Bool *Boolean `| @("true" | "false")`
}
```
## "Union" types
A very common pattern in parsers is "union" types, an example of which is
shown above in the `Value` type. A common way of expressing this in Go is via
a sealed interface, with each member of the union implementing this
interface.
eg. this is how the `Value` type could be expressed in this way:
```go
type Value interface { value() }
type Float struct { Value float64 `@Float` }
func (f Float) value() {}
type Int struct { Value int `@Int` }
func (f Int) value() {}
type String struct { Value string `@String` }
func (f String) value() {}
type Bool struct { Value Boolean `@("true" | "false")` }
func (f Bool) value() {}
```
Thanks to the efforts of [Jacob Ryan McCollum](https://github.com/mccolljr), Participle
now supports this pattern. Simply construct your parser with the `Union[T](member...T)`
option, eg.
```go
parser := participle.MustBuild[AST](participle.Union[Value](Float{}, Int{}, String{}, Bool{}))
```
Custom parsers may also be defined for union types with the [ParseTypeWith](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseTypeWith) option.
## Custom parsing
There are three ways of defining custom parsers for nodes in the grammar:
1. Implement the [Capture](https://pkg.go.dev/github.com/alecthomas/participle/v2#Capture) interface.
2. Implement the [Parseable](https://pkg.go.dev/github.com/alecthomas/participle/v2#Parseable) interface.
3. Use the [ParseTypeWith](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseTypeWith) option to specify a custom parser for union interface types.
## Lexing
Participle relies on distinct lexing and parsing phases. The lexer takes raw
bytes and produces tokens which the parser consumes. The parser transforms
these tokens into Go values.
The default lexer, if one is not explicitly configured, is based on the Go
`text/scanner` package and thus produces tokens for C/Go-like source code. This
is surprisingly useful, but if you do require more control over lexing the
included stateful [`participle/lexer`](#markdown-stateful-lexer) lexer should
cover most other cases. If that in turn is not flexible enough, you can
implement your own lexer.
Configure your parser with a lexer using the `participle.Lexer()` option.
To use your own Lexer you will need to implement two interfaces:
[Definition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Definition)
(and optionally [StringsDefinition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#StringDefinition) and [BytesDefinition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#BytesDefinition)) and [Lexer](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Lexer).
### Stateful lexer
In addition to the default lexer, Participle includes an optional
stateful/modal lexer which provides powerful yet convenient
construction of most lexers. (Notably, indentation based lexers cannot
be expressed using the `stateful` lexer -- for discussion of how these
lexers can be implemented, see [#20](https://github.com/alecthomas/participle/issues/20)).
It is sometimes the case that a simple lexer cannot fully express the tokens
required by a parser. The canonical example of this is interpolated strings
within a larger language. eg.
```go
let a = "hello ${name + ", ${last + "!"}"}"
```
This is impossible to tokenise with a normal lexer due to the arbitrarily
deep nesting of expressions. To support this case Participle's lexer is now
stateful by default.
The lexer is a state machine defined by a map of rules keyed by the state
name. Each rule within the state includes the name of the produced token, the
regex to match, and an optional operation to apply when the rule matches.
As a convenience, any `Rule` starting with a lowercase letter will be elided
from output, though it is recommended to use `participle.Elide()` instead, as it
better integrates with the parser.
Lexing starts in the `Root` group. Each rule is matched in order, with the first
successful match producing a lexeme. If the matching rule has an associated Action
it will be executed.
A state change can be introduced with the Action `Push(state)`. `Pop()` will
return to the previous state.
To reuse rules from another state, use `Include(state)`.
A special named rule `Return()` can also be used as the final rule in a state
to always return to the previous state.
As a special case, regexes containing backrefs in the form `\N` (where `N` is
a digit) will match the corresponding capture group from the immediate parent
group. This can be used to parse, among other things, heredocs. See the
[tests](https://github.com/alecthomas/participle/blob/master/lexer/stateful_test.go#L59)
for an example of this, among others.
### Example stateful lexer
Here's a cut down example of the string interpolation described above. Refer to
the [stateful example](https://github.com/alecthomas/participle/tree/master/_examples/stateful)
for the corresponding parser.
```go
var lexer = lexer.Must(Rules{
"Root": {
{`String`, `"`, Push("String")},
},
"String": {
{"Escaped", `\\.`, nil},
{"StringEnd", `"`, Pop()},
{"Expr", `\${`, Push("Expr")},
{"Char", `[^$"\\]+`, nil},
},
"Expr": {
Include("Root"),
{`whitespace`, `\s+`, nil},
{`Oper`, `[-+/*%]`, nil},
{"Ident", `\w+`, nil},
{"ExprEnd", `}`, Pop()},
},
})
```
### Example simple/non-stateful lexer
Other than the default and stateful lexers, it's easy to define your
own _stateless_ lexer using the `lexer.MustSimple()` and
`lexer.NewSimple()` functions. These functions accept a slice of
`lexer.SimpleRule{}` objects consisting of a key and a regex-style pattern.
> **Note:** The stateful lexer replaces the old regex lexer.
For example, the lexer for a form of BASIC:
```go
var basicLexer = lexer.MustSimple([]lexer.SimpleRule{
{"Comment", `(?i)rem[^\n]*`},
{"String", `"(\\"|[^"])*"`},
{"Number", `[-+]?(\d*\.)?\d+`},
{"Ident", `[a-zA-Z_]\w*`},
{"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`},
{"EOL", `[\n\r]+`},
{"whitespace", `[ \t]+`},
})
```
### Experimental - code generation
Participle v2 now has experimental support for generating code to perform
lexing.
This will generally provide around a 10x improvement in lexing performance
while producing O(1) garbage.
To use:
1. Serialize the `stateful` lexer definition to a JSON file (pass to `json.Marshal`).
2. Run the `participle` command (see `scripts/participle`) to generate go code from the lexer JSON definition. For example:
```
participle gen lexer [--name SomeCustomName] < mylexer.json | gofmt > mypackage/mylexer.go
```
(see `genLexer` in `conformance_test.go` for a more detailed example)
3. When constructing your parser, use the generated lexer for your lexer definition, such as:
```
var ParserDef = participle.MustBuild[someGrammer](participle.Lexer(mylexer.SomeCustomnameLexer))
```
Consider contributing to the tests in `conformance_test.go` if they do not
appear to cover the types of expressions you are using the generated
lexer.
**Known limitations of the code generated lexer:**
* The lexer is always greedy. e.g., the regex `"[A-Z][A-Z][A-Z]?T"` will not match `"EST"` in the generated lexer because the quest operator is a greedy match and does not "give back" to try other possibilities; you can overcome by using `|` if you have a non-greedy match, e.g., `"[A-Z][A-Z]|(?:[A-Z]T|T)"` will produce correct results in both lexers (see [#276](https://github.com/alecthomas/participle/issues/276) for more detail); this limitation allows the generated lexer to be very fast and memory efficient
* Backreferences in regular expressions are not currently supported
## Options
The Parser's behaviour can be configured via [Options](https://pkg.go.dev/github.com/alecthomas/participle/v2#Option).
## Examples
There are several [examples included](https://github.com/alecthomas/participle/tree/master/_examples),
some of which are linked directly here. These examples should be run from the
`_examples` subdirectory within a cloned copy of this repository.
Example | Description
--------|---------------
[BASIC](https://github.com/alecthomas/participle/tree/master/_examples/basic) | A lexer, parser and interpreter for a [rudimentary dialect](https://caml.inria.fr/pub/docs/oreilly-book/html/book-ora058.html) of BASIC.
[EBNF](https://github.com/alecthomas/participle/tree/master/_examples/ebnf) | Parser for the form of EBNF used by Go.
[Expr](https://github.com/alecthomas/participle/tree/master/_examples/expr) | A basic mathematical expression parser and evaluator.
[GraphQL](https://github.com/alecthomas/participle/tree/master/_examples/graphql) | Lexer+parser for GraphQL schemas
[HCL](https://github.com/alecthomas/participle/tree/master/_examples/hcl) | A parser for the [HashiCorp Configuration Language](https://github.com/hashicorp/hcl).
[INI](https://github.com/alecthomas/participle/tree/master/_examples/ini) | An INI file parser.
[Protobuf](https://github.com/alecthomas/participle/tree/master/_examples/protobuf) | A full [Protobuf](https://developers.google.com/protocol-buffers/) version 2 and 3 parser.
[SQL](https://github.com/alecthomas/participle/tree/master/_examples/sql) | A *very* rudimentary SQL SELECT parser.
[Stateful](https://github.com/alecthomas/participle/tree/master/_examples/stateful) | A basic example of a stateful lexer and corresponding parser.
[Thrift](https://github.com/alecthomas/participle/tree/master/_examples/thrift) | A full [Thrift](https://thrift.apache.org/docs/idl) parser.
[TOML](https://github.com/alecthomas/participle/tree/master/_examples/toml) | A [TOML](https://github.com/toml-lang/toml) parser.
Included below is a full GraphQL lexer and parser:
```go
package main
import (
"fmt"
"os"
"github.com/alecthomas/kong"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
type File struct {
Entries []*Entry `@@*`
}
type Entry struct {
Type *Type ` @@`
Schema *Schema `| @@`
Enum *Enum `| @@`
Scalar string `| "scalar" @Ident`
}
type Enum struct {
Name string `"enum" @Ident`
Cases []string `"{" @Ident* "}"`
}
type Schema struct {
Fields []*Field `"schema" "{" @@* "}"`
}
type Type struct {
Name string `"type" @Ident`
Implements string `( "implements" @Ident )?`
Fields []*Field `"{" @@* "}"`
}
type Field struct {
Name string `@Ident`
Arguments []*Argument `( "(" ( @@ ( "," @@ )* )? ")" )?`
Type *TypeRef `":" @@`
Annotation string `( "@" @Ident )?`
}
type Argument struct {
Name string `@Ident`
Type *TypeRef `":" @@`
Default *Value `( "=" @@ )`
}
type TypeRef struct {
Array *TypeRef `( "[" @@ "]"`
Type string ` | @Ident )`
NonNullable bool `( @"!" )?`
}
type Value struct {
Symbol string `@Ident`
}
var (
graphQLLexer = lexer.MustSimple([]lexer.SimpleRule{
{"Comment", `(?:#|//)[^\n]*\n?`},
{"Ident", `[a-zA-Z]\w*`},
{"Number", `(?:\d*\.)?\d+`},
{"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`},
{"Whitespace", `[ \t\n\r]+`},
})
parser = participle.MustBuild[File](
participle.Lexer(graphQLLexer),
participle.Elide("Comment", "Whitespace"),
participle.UseLookahead(2),
)
)
var cli struct {
EBNF bool `help"Dump EBNF."`
Files []string `arg:"" optional:"" type:"existingfile" help:"GraphQL schema files to parse."`
}
func main() {
ctx := kong.Parse(&cli)
if cli.EBNF {
fmt.Println(parser.String())
ctx.Exit(0)
}
for _, file := range cli.Files {
r, err := os.Open(file)
ctx.FatalIfErrorf(err)
ast, err := parser.Parse(file, r)
r.Close()
repr.Println(ast)
ctx.FatalIfErrorf(err)
}
}
```
## Performance
One of the included examples is a complete Thrift parser
(shell-style comments are not supported). This gives
a convenient baseline for comparing to the PEG based
[pigeon](https://github.com/PuerkitoBio/pigeon), which is the parser used by
[go-thrift](https://github.com/samuel/go-thrift). Additionally, the pigeon
parser is utilising a generated parser, while the participle parser is built at
run time.
You can run the benchmarks yourself, but here's the output on my machine:
BenchmarkParticipleThrift-12 5941 201242 ns/op 178088 B/op 2390 allocs/op
BenchmarkGoThriftParser-12 3196 379226 ns/op 157560 B/op 2644 allocs/op
On a real life codebase of 47K lines of Thrift, Participle takes 200ms and go-
thrift takes 630ms, which aligns quite closely with the benchmarks.
## Concurrency
A compiled `Parser` instance can be used concurrently. A `LexerDefinition` can be used concurrently. A `Lexer` instance cannot be used concurrently.
## Error reporting
There are a few areas where Participle can provide useful feedback to users of your parser.
1. Errors returned by [Parser.Parse*()](https://pkg.go.dev/github.com/alecthomas/participle/v2#Parser.Parse) will be:
1. Of type [Error](https://pkg.go.dev/github.com/alecthomas/participle/v2#Error). This will contain positional information where available.
2. May either be [ParseError](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseError) or [lexer.Error](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Error)
2. Participle will make a best effort to return as much of the AST up to the error location as possible.
3. Any node in the AST containing a field `Pos lexer.Position` [^1] will be automatically
populated from the nearest matching token.
4. Any node in the AST containing a field `EndPos lexer.Position` [^1] will be
automatically populated from the token at the end of the node.
5. Any node in the AST containing a field `Tokens []lexer.Token` will be automatically
populated with _all_ tokens captured by the node, _including_ elided tokens.
[^1]: Either the concrete type or a type convertible to it, allowing user defined types to be used.
These related pieces of information can be combined to provide fairly comprehensive error reporting.
## Comments
Comments can be difficult to capture as in most languages they may appear almost
anywhere. There are three ways of capturing comments, with decreasing fidelity.
The first is to elide tokens in the parser, then add `Tokens []lexer.Token` as a
field to each AST node. Comments will be included. This has the downside that
there's no straightforward way to know where the comments are relative to
non-comment tokens in that node.
The second way is to _not_ elide comment tokens, and explicitly capture them at
every location in the AST where they might occur. This has the downside that
unless you place these captures in every possible valid location, users might
insert valid comments that then fail to parse.
The third way is to elide comment tokens and capture them where they're
semantically meaningful, such as for documentation comments. Participle supports
explicitly matching elided tokens for this purpose.
## Limitations
Internally, Participle is a recursive descent parser with backtracking (see
`UseLookahead(K)`).
Among other things, this means that Participle grammars do not support left
recursion. Left recursion must be eliminated by restructuring your grammar.
## EBNF
The old `EBNF` lexer was removed in a major refactoring at
[362b26](https://github.com/alecthomas/participle/commit/362b26640fa3dc406aa60960f7d9a5b9a909414e)
-- if you have an EBNF grammar you need to implement, you can either translate
it into regex-style `lexer.Rule{}` syntax or implement your own EBNF lexer
you might be able to use [the old EBNF lexer](https://github.com/alecthomas/participle/blob/2403858c8b2068b4b0cf96a6b36dd7069674039b/lexer/ebnf/ebnf.go)
-- as a starting point.
Participle supports outputting an EBNF grammar from a Participle parser. Once
the parser is constructed simply call `String()`.
Participle also [includes a parser](https://pkg.go.dev/github.com/alecthomas/participle/v2/ebnf) for this form of EBNF (naturally).
eg. The [GraphQL example](https://github.com/alecthomas/participle/blob/master/_examples/graphql/main.go#L15-L62)
gives in the following EBNF:
```ebnf
File = Entry* .
Entry = Type | Schema | Enum | "scalar" ident .
Type = "type" ident ("implements" ident)? "{" Field* "}" .
Field = ident ("(" (Argument ("," Argument)*)? ")")? ":" TypeRef ("@" ident)? .
Argument = ident ":" TypeRef ("=" Value)? .
TypeRef = "[" TypeRef "]" | ident "!"? .
Value = ident .
Schema = "schema" "{" Field* "}" .
Enum = "enum" ident "{" ident* "}" .
```
## Syntax/Railroad Diagrams
Participle includes a [command-line utility](https://github.com/alecthomas/participle/tree/master/cmd/railroad) to take an EBNF representation of a Participle grammar
(as returned by `Parser.String()`) and produce a Railroad Diagram using
[tabatkins/railroad-diagrams](https://github.com/tabatkins/railroad-diagrams).
Here's what the GraphQL grammar looks like:

golang-github-alecthomas-participle-v2-2.1.4/TUTORIAL.md 0000664 0000000 0000000 00000017113 15053003664 0022634 0 ustar 00root root 0000000 0000000 # Participle parser tutorial
- [Introduction](#introduction)
- [The complete grammar](#the-complete-grammar)
- [Root of the .ini AST (structure, fields)](#root-of-the-ini-ast-structure-fields)
- [.ini properties (named tokens, capturing, literals)](#ini-properties-named-tokens-capturing-literals)
- [.ini property values (alternates, recursive structs, sequences)](#ini-property-values-alternates-recursive-structs-sequences)
- [Complete, but limited, .ini grammar (top-level properties only)](#complete-but-limited-ini-grammar-top-level-properties-only)
- [Extending our grammar to support sections](#extending-our-grammar-to-support-sections)
- [(Optional) Source positional information](#optional-source-positional-information)
- [Parsing using our grammar](#parsing-using-our-grammar)
## Introduction
Writing a parser in Participle typically involves starting from the "root" of
the AST, annotating fields with the grammar, then recursively expanding until
it is complete. The AST is expressed via Go data types and the grammar is
expressed through struct field tags, as a form of EBNF.
The parser we're going to create for this tutorial parses .ini files
like this:
```ini
age = 21
name = "Bob Smith"
[address]
city = "Beverly Hills"
postal_code = 90210
```
## The complete grammar
I think it's useful to see the complete grammar first, to see what we're
working towards. Read on below for details.
```go
type INI struct {
Properties []*Property `@@*`
Sections []*Section `@@*`
}
type Section struct {
Identifier string `"[" @Ident "]"`
Properties []*Property `@@*`
}
type Property struct {
Key string `@Ident "="`
Value Value `@@`
}
type Value interface{ value() }
type String struct {
String string `@String`
}
func (String) value() {}
type Number struct {
Number float64 `@Float | @Int`
}
func (Number) value() {}
```
## Root of the .ini AST (structure, fields)
The first step is to create a root struct for our grammar. In the case of our
.ini parser, this struct will contain a sequence of properties:
```go
type INI struct {
Properties []*Property
}
type Property struct {
}
```
## .ini properties (named tokens, capturing, literals)
Each property in an .ini file has an identifier key:
```go
type Property struct {
Key string
}
```
The default lexer tokenises Go source code, and includes an `Ident` token type
that matches identifiers. To match this token we simply use the token type
name:
```go
type Property struct {
Key string `Ident`
}
```
This will *match* identifiers, but not *capture* them into the `Key` field. To
capture input tokens into AST fields, prefix any grammar node with `@`:
```go
type Property struct {
Key string `@Ident`
}
```
In .ini files, each key is separated from its value with a literal `=`. To
match a literal, enclose the literal in double quotes:
```go
type Property struct {
Key string `@Ident "="`
}
```
> Note: literals in the grammar must match tokens from the lexer *exactly*. In
> this example if the lexer does not output `=` as a distinct token the
> grammar will not match.
## .ini property values (alternates, recursive structs, sequences)
For the purposes of our example we are only going to support quoted string
and numeric property values. As each value can be *either* a string or a float
we'll need something akin to a sum type. Participle supports this via the
`Union[T any](members...T) Option` parser option. This tells the parser that
when a field of interface type `T` is encountered, it should try to match each
of the `members` in turn, and return the first successful match.
```go
type Value interface{ value() }
type String struct {
String string `@String`
}
func (String) value() {}
type Number struct {
Number float64 `@Float`
}
func (Number) value() {}
```
Since we want to also parse integers and the default lexer differentiates
between floats and integers, we need to explicitly match either. To express
matching a set of alternatives such as this, we use the `|` operator:
```go
type Number struct {
Number float64 `@Float | @Int`
}
```
> Note: the grammar can cross fields.
Next, we'll match values and capture them into the `Property`. To recursively
capture structs use `@@` (capture self):
```go
type Property struct {
Key string `@Ident "="`
Value Value `@@`
}
```
Now that we can parse a `Property` we need to go back to the root of the
grammar. We want to parse 0 or more properties. To do this, we use `*`.
Participle will accumulate each match into the slice until matching fails,
then move to the next node in the grammar.
```go
type INI struct {
Properties []*Property `@@*`
}
```
> Note: tokens can also be accumulated into strings, appending each match.
## Complete, but limited, .ini grammar (top-level properties only)
We now have a functional, but limited, .ini parser!
```go
type INI struct {
Properties []*Property `@@*`
}
type Property struct {
Key string `@Ident "="`
Value Value `@@`
}
type Value interface{ value() }
type String struct {
String string `@String`
}
func (String) value() {}
type Number struct {
Number float64 `@Float | @Int`
}
func (Number) value() {}
```
## Extending our grammar to support sections
Adding support for sections is simply a matter of utilising the constructs
we've just learnt. A section consists of a header identifier, and a sequence
of properties:
```go
type Section struct {
Identifier string `"[" @Ident "]"`
Properties []*Property `@@*`
}
```
Simple!
Now we just add a sequence of `Section`s to our root node:
```go
type INI struct {
Properties []*Property `@@*`
Sections []*Section `@@*`
}
```
And we're done!
## (Optional) Source positional information
If a grammar node includes a field with the name `Pos` and type `lexer.Position`, it will be automatically populated by positional information. eg.
```go
type String struct {
Pos lexer.Position
String string `@String`
}
type Number struct {
Pos lexer.Position
Number float64 `@Float | @Int`
}
```
This is useful for error reporting.
## Parsing using our grammar
To parse with this grammar we first construct the parser (we'll use the
default lexer for now):
```go
parser, err := participle.Build[INI](
participle.Unquote("String"),
participle.Union[Value](String{}, Number{}),
)
```
Then parse a new INI file with `parser.Parse{,String,Bytes}()`:
```go
ini, err := parser.ParseString("", `
age = 21
name = "Bob Smith"
[address]
city = "Beverly Hills"
postal_code = 90210
`)
```
You can find the full example [here](_examples/ini/main.go), alongside
other examples including an SQL `SELECT` parser and a full
[Thrift](https://thrift.apache.org/) parser.
golang-github-alecthomas-participle-v2-2.1.4/_examples/ 0000775 0000000 0000000 00000000000 15053003664 0023021 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/ 0000775 0000000 0000000 00000000000 15053003664 0024102 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/ast.go 0000664 0000000 0000000 00000004504 15053003664 0025223 0 ustar 00root root 0000000 0000000 // nolint: golint
package main
import (
"io"
"strings"
"github.com/alecthomas/participle/v2/lexer"
)
// Parse a BASIC program.
func Parse(r io.Reader) (*Program, error) {
program, err := basicParser.Parse("", r)
if err != nil {
return nil, err
}
program.init()
return program, nil
}
type Program struct {
Pos lexer.Position
Commands []*Command `@@*`
Table map[int]*Command
}
type Command struct {
Pos lexer.Position
Index int
Line int `@Number`
Remark *Remark `( @@`
Input *Input ` | @@`
Let *Let ` | @@`
Goto *Goto ` | @@`
If *If ` | @@`
Print *Print ` | @@`
Call *Call ` | @@ ) EOL`
}
type Remark struct {
Pos lexer.Position
Comment string `@Comment`
}
type Call struct {
Pos lexer.Position
Name string `@Ident`
Args []*Expression `"(" ( @@ ( "," @@ )* )? ")"`
}
type Print struct {
Pos lexer.Position
Expression *Expression `"PRINT" @@`
}
type Input struct {
Pos lexer.Position
Variable string `"INPUT" @Ident`
}
type Let struct {
Pos lexer.Position
Variable string `"LET" @Ident`
Value *Expression `"=" @@`
}
type Goto struct {
Pos lexer.Position
Line int `"GOTO" @Number`
}
type If struct {
Pos lexer.Position
Condition *Expression `"IF" @@`
Line int `"THEN" @Number`
}
type Operator string
func (o *Operator) Capture(s []string) error {
*o = Operator(strings.Join(s, ""))
return nil
}
type Value struct {
Pos lexer.Position
Number *float64 ` @Number`
Variable *string `| @Ident`
String *string `| @String`
Call *Call `| @@`
Subexpression *Expression `| "(" @@ ")"`
}
type Factor struct {
Pos lexer.Position
Base *Value `@@`
Exponent *Value `( "^" @@ )?`
}
type OpFactor struct {
Pos lexer.Position
Operator Operator `@("*" | "/")`
Factor *Factor `@@`
}
type Term struct {
Pos lexer.Position
Left *Factor `@@`
Right []*OpFactor `@@*`
}
type OpTerm struct {
Pos lexer.Position
Operator Operator `@("+" | "-")`
Term *Term `@@`
}
type Cmp struct {
Pos lexer.Position
Left *Term `@@`
Right []*OpTerm `@@*`
}
type OpCmp struct {
Pos lexer.Position
Operator Operator `@("=" | "<" "=" | ">" "=" | "<" | ">" | "!" "=")`
Cmp *Cmp `@@`
}
type Expression struct {
Pos lexer.Position
Left *Cmp `@@`
Right []*OpCmp `@@*`
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/eval.go 0000664 0000000 0000000 00000015344 15053003664 0025367 0 ustar 00root root 0000000 0000000 // nolint: golint, dupl
package main
import (
"fmt"
"io"
"math"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
)
type Evaluatable interface {
Evaluate(ctx *Context) (interface{}, error)
}
type Function func(args ...interface{}) (interface{}, error)
// Context for evaluation.
type Context struct {
// User-provided functions.
Functions map[string]Function
// Vars defined during evaluation.
Vars map[string]interface{}
// Reader from which INPUT is read.
Input io.Reader
// Writer where PRINTing will write.
Output io.Writer
}
func (p *Program) init() {
p.Table = map[int]*Command{}
for index, cmd := range p.Commands {
cmd.Index = index
p.Table[cmd.Line] = cmd
}
}
func (v *Value) Evaluate(ctx *Context) (interface{}, error) {
switch {
case v.Number != nil:
return *v.Number, nil
case v.String != nil:
return *v.String, nil
case v.Variable != nil:
value, ok := ctx.Vars[*v.Variable]
if !ok {
return nil, fmt.Errorf("unknown variable %q", *v.Variable)
}
return value, nil
case v.Subexpression != nil:
return v.Subexpression.Evaluate(ctx)
case v.Call != nil:
return v.Call.Evaluate(ctx)
}
panic("unsupported value type" + repr.String(v))
}
func (f *Factor) Evaluate(ctx *Context) (interface{}, error) {
base, err := f.Base.Evaluate(ctx)
if err != nil {
return nil, err
}
if f.Exponent == nil {
return base, nil
}
baseNum, exponentNum, err := evaluateFloats(ctx, base, f.Exponent)
if err != nil {
return nil, participle.Errorf(f.Pos, "invalid factor: %s", err)
}
return math.Pow(baseNum, exponentNum), nil
}
func (o *OpFactor) Evaluate(ctx *Context, lhs interface{}) (interface{}, error) {
lhsNumber, rhsNumber, err := evaluateFloats(ctx, lhs, o.Factor)
if err != nil {
return nil, participle.Errorf(o.Pos, "invalid arguments for %s: %s", o.Operator, err)
}
switch o.Operator {
case "*":
return lhsNumber * rhsNumber, nil
case "/":
return lhsNumber / rhsNumber, nil
}
panic("unreachable")
}
func (t *Term) Evaluate(ctx *Context) (interface{}, error) {
lhs, err := t.Left.Evaluate(ctx)
if err != nil {
return nil, err
}
for _, right := range t.Right {
rhs, err := right.Evaluate(ctx, lhs)
if err != nil {
return nil, err
}
lhs = rhs
}
return lhs, nil
}
func (o *OpTerm) Evaluate(ctx *Context, lhs interface{}) (interface{}, error) {
lhsNumber, rhsNumber, err := evaluateFloats(ctx, lhs, o.Term)
if err != nil {
return nil, participle.Errorf(o.Pos, "invalid arguments for %s: %s", o.Operator, err)
}
switch o.Operator {
case "+":
return lhsNumber + rhsNumber, nil
case "-":
return lhsNumber - rhsNumber, nil
}
panic("unreachable")
}
func (c *Cmp) Evaluate(ctx *Context) (interface{}, error) {
lhs, err := c.Left.Evaluate(ctx)
if err != nil {
return nil, err
}
for _, right := range c.Right {
rhs, err := right.Evaluate(ctx, lhs)
if err != nil {
return nil, err
}
lhs = rhs
}
return lhs, nil
}
func (o *OpCmp) Evaluate(ctx *Context, lhs interface{}) (interface{}, error) {
rhs, err := o.Cmp.Evaluate(ctx)
if err != nil {
return nil, err
}
switch lhs := lhs.(type) {
case float64:
rhs, ok := rhs.(float64)
if !ok {
return nil, participle.Errorf(o.Pos, "rhs of %s must be a number", o.Operator)
}
switch o.Operator {
case "=":
return lhs == rhs, nil
case "!=":
return lhs != rhs, nil
case "<":
return lhs < rhs, nil
case ">":
return lhs > rhs, nil
case "<=":
return lhs <= rhs, nil
case ">=":
return lhs >= rhs, nil
}
case string:
rhs, ok := rhs.(string)
if !ok {
return nil, participle.Errorf(o.Pos, "rhs of %s must be a string", o.Operator)
}
switch o.Operator {
case "=":
return lhs == rhs, nil
case "!=":
return lhs != rhs, nil
case "<":
return lhs < rhs, nil
case ">":
return lhs > rhs, nil
case "<=":
return lhs <= rhs, nil
case ">=":
return lhs >= rhs, nil
}
default:
return nil, participle.Errorf(o.Pos, "lhs of %s must be a number or string", o.Operator)
}
panic("unreachable")
}
func (e *Expression) Evaluate(ctx *Context) (interface{}, error) {
lhs, err := e.Left.Evaluate(ctx)
if err != nil {
return nil, err
}
for _, right := range e.Right {
rhs, err := right.Evaluate(ctx, lhs)
if err != nil {
return nil, err
}
lhs = rhs
}
return lhs, nil
}
func (c *Call) Evaluate(ctx *Context) (interface{}, error) {
function, ok := ctx.Functions[c.Name]
if !ok {
return nil, participle.Errorf(c.Pos, "unknown function %q", c.Name)
}
args := []interface{}{}
for _, arg := range c.Args {
value, err := arg.Evaluate(ctx)
if err != nil {
return nil, err
}
args = append(args, value)
}
value, err := function(args...)
if err != nil {
return nil, participle.Errorf(c.Pos, "call to %s() failed", c.Name)
}
return value, nil
}
func (p *Program) Evaluate(r io.Reader, w io.Writer, functions map[string]Function) error {
if len(p.Commands) == 0 {
return nil
}
ctx := &Context{
Vars: map[string]interface{}{},
Functions: functions,
Input: r,
Output: w,
}
for index := 0; index < len(p.Commands); {
cmd := p.Commands[index]
switch {
case cmd.Goto != nil:
cmd := cmd.Goto
next, ok := p.Table[cmd.Line]
if !ok {
return participle.Errorf(cmd.Pos, "invalid line number %d", cmd.Line)
}
index = next.Index
continue
case cmd.Remark != nil:
case cmd.Let != nil:
cmd := cmd.Let
value, err := cmd.Value.Evaluate(ctx)
if err != nil {
return err
}
ctx.Vars[cmd.Variable] = value
case cmd.Print != nil:
cmd := cmd.Print
value, err := cmd.Expression.Evaluate(ctx)
if err != nil {
return err
}
fmt.Fprintln(ctx.Output, value)
case cmd.Input != nil:
cmd := cmd.Input
var value float64
_, err := fmt.Fscanln(ctx.Input, &value)
if err != nil {
return participle.Errorf(cmd.Pos, "invalid input: %s", err)
}
ctx.Vars[cmd.Variable] = value
case cmd.If != nil:
cmd := cmd.If
condition, err := cmd.Condition.Evaluate(ctx)
if err != nil {
return err
}
if test, ok := condition.(bool); ok && test {
next, ok := p.Table[cmd.Line]
if !ok {
return participle.Errorf(cmd.Pos, "invalid line number %d", cmd.Line)
}
index = next.Index
continue
}
case cmd.Call != nil:
_, err := cmd.Call.Evaluate(ctx)
if err != nil {
return err
}
default:
panic("unsupported command " + repr.String(cmd))
}
index++
}
return nil
}
func evaluateFloats(ctx *Context, lhs interface{}, rhsExpr Evaluatable) (float64, float64, error) {
rhs, err := rhsExpr.Evaluate(ctx)
if err != nil {
return 0, 0, err
}
lhsNumber, ok := lhs.(float64)
if !ok {
return 0, 0, fmt.Errorf("lhs must be a number")
}
rhsNumber, ok := rhs.(float64)
if !ok {
return 0, 0, fmt.Errorf("rhs must be a number")
}
return lhsNumber, rhsNumber, nil
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/example.bas 0000664 0000000 0000000 00000000335 15053003664 0026225 0 ustar 00root root 0000000 0000000 5 REM inputting the argument
10 PRINT "Factorial of:"
20 INPUT A
30 LET B = 1
35 REM beginning of the loop
40 IF A <= 1 THEN 80
50 LET B = B * A
60 LET A = A - 1
70 GOTO 40
75 REM prints the result
80 PRINT B
golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/hidden.bas 0000664 0000000 0000000 00000000315 15053003664 0026023 0 ustar 00root root 0000000 0000000 10 PRINT "Give the hidden number: "
20 INPUT N
30 PRINT "Give a number: "
40 INPUT R
50 IF R = N THEN 110
60 IF R < N THEN 90
70 PRINT "C-"
80 GOTO 30
90 PRINT "C+"
100 GOTO 30
110 PRINT "CONGRATULATIONS"
golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/main.go 0000664 0000000 0000000 00000002134 15053003664 0025355 0 ustar 00root root 0000000 0000000 // nolint: golint, dupl
package main
import (
"os"
"github.com/alecthomas/kong"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var (
basicLexer = lexer.MustSimple([]lexer.SimpleRule{
{"Comment", `(?i)rem[^\n]*`},
{"String", `"(\\"|[^"])*"`},
{"Number", `[-+]?(\d*\.)?\d+`},
{"Ident", `[a-zA-Z_]\w*`},
{"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`},
{"EOL", `[\n\r]+`},
{"whitespace", `[ \t]+`},
})
basicParser = participle.MustBuild[Program](
participle.Lexer(basicLexer),
participle.CaseInsensitive("Ident"),
participle.Unquote("String"),
participle.UseLookahead(2),
)
cli struct {
File string `arg:"" type:"existingfile" help:"File to parse."`
}
)
func main() {
ctx := kong.Parse(&cli)
r, err := os.Open(cli.File)
ctx.FatalIfErrorf(err)
defer r.Close()
program, err := Parse(r)
ctx.FatalIfErrorf(err)
funcs := map[string]Function{
"ADD": func(args ...interface{}) (interface{}, error) {
return args[0].(float64) + args[1].(float64), nil
},
}
err = program.Evaluate(os.Stdin, os.Stdout, funcs)
ctx.FatalIfErrorf(err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/basic/main_test.go 0000664 0000000 0000000 00000000644 15053003664 0026420 0 ustar 00root root 0000000 0000000 package main
import (
"strings"
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestExe(t *testing.T) {
src := `5 REM inputting the argument
10 PRINT "Factorial of:"
20 INPUT A
30 LET B = 1
35 REM beginning of the loop
40 IF A <= 1 THEN 80
50 LET B = B * A
60 LET A = A - 1
70 GOTO 40
75 REM prints the result
80 PRINT B
`
_, err := Parse(strings.NewReader(src))
require.NoError(t, err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/ebnf/ 0000775 0000000 0000000 00000000000 15053003664 0023733 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/ebnf/main.go 0000664 0000000 0000000 00000006472 15053003664 0025217 0 ustar 00root root 0000000 0000000 package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/alecthomas/kong"
"github.com/alecthomas/participle/v2"
)
var cli struct {
JSON bool `help:"Display AST as JSON."`
}
type Group struct {
Expression *Expression `"(" @@ ")"`
}
func (g *Group) String() string {
return fmt.Sprintf("( %s )", g.Expression)
}
type Option struct {
Expression *Expression `"[" @@ "]"`
}
func (o *Option) String() string {
return fmt.Sprintf("[ %s ]", o.Expression)
}
type Repetition struct {
Expression *Expression `"{" @@ "}"`
}
func (r *Repetition) String() string {
return fmt.Sprintf("{ %s }", r.Expression)
}
type Literal struct {
Start string `@String` // Lexer token "String"
End string `( "…" @String )?`
}
func (l *Literal) String() string {
if l.End != "" {
return fmt.Sprintf("%q … %q", l.Start, l.End)
}
return fmt.Sprintf("%q", l.Start)
}
type Term struct {
Name string `@Ident |`
Literal *Literal `@@ |`
Group *Group `@@ |`
Option *Option `@@ |`
Repetition *Repetition `@@`
}
func (t *Term) String() string {
switch {
case t.Name != "":
return t.Name
case t.Literal != nil:
return t.Literal.String()
case t.Group != nil:
return t.Group.String()
case t.Option != nil:
return t.Option.String()
case t.Repetition != nil:
return t.Repetition.String()
default:
panic("wut")
}
}
type Sequence struct {
Terms []*Term `@@+`
}
func (s *Sequence) String() string {
terms := []string{}
for _, term := range s.Terms {
terms = append(terms, term.String())
}
return strings.Join(terms, " ")
}
type Expression struct {
Alternatives []*Sequence `@@ ( "|" @@ )*`
}
func (e *Expression) String() string {
sequences := []string{}
for _, sequence := range e.Alternatives {
sequences = append(sequences, sequence.String())
}
return strings.Join(sequences, " | ")
}
type Expressions []*Expression
func (e Expressions) String() string {
expressions := []string{}
for _, expression := range e {
expressions = append(expressions, expression.String())
}
return strings.Join(expressions, " ")
}
type Production struct {
Name string `@Ident "="`
Expressions Expressions `@@+ "."`
}
func (p *Production) String() string {
expressions := []string{}
for _, expression := range p.Expressions {
expressions = append(expressions, expression.String())
}
return fmt.Sprintf("%s = %s .", p.Name, strings.Join(expressions, " "))
}
type EBNF struct {
Productions []*Production `@@*`
}
func (e *EBNF) String() string {
w := bytes.NewBuffer(nil)
for _, production := range e.Productions {
fmt.Fprintf(w, "%s\n", production)
}
return w.String()
}
var parser = participle.MustBuild[EBNF]()
func main() {
help := `An EBNF parser compatible with Go"s exp/ebnf. The grammar is
in the form:
Production = name "=" [ Expression ] "." .
Expression = Alternative { "|" Alternative } .
Alternative = Term { Term } .
Term = name | token [ "…" token ] | Group | Option | Repetition .
Group = "(" Expression ")" .
Option = "[" Expression "]" .
Repetition = "{" Expression "}" .
`
ctx := kong.Parse(&cli, kong.Description(help))
ebnf, err := parser.Parse("", os.Stdin)
ctx.FatalIfErrorf(err, "")
if cli.JSON {
bytes, _ := json.MarshalIndent(ebnf, "", " ")
fmt.Printf("%s\n", bytes)
} else {
fmt.Print(ebnf)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/ebnf/main_test.go 0000664 0000000 0000000 00000000751 15053003664 0026250 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestExe(t *testing.T) {
_, err := parser.ParseString("", `
Production = name "=" [ Expression ] "." .
Expression = Alternative { "|" Alternative } .
Alternative = Term { Term } .
Term = name | token [ "…" token ] | Group | Option | Repetition .
Group = "(" Expression ")" .
Option = "[" Expression "]" .
Repetition = "{" Expression "}" .`)
require.NoError(t, err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/expr/ 0000775 0000000 0000000 00000000000 15053003664 0023777 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/expr/main.go 0000664 0000000 0000000 00000007210 15053003664 0025252 0 ustar 00root root 0000000 0000000 // nolint: govet
package main
import (
"encoding/json"
"fmt"
"math"
"os"
"strings"
"github.com/alecthomas/kong"
"github.com/alecthomas/participle/v2"
)
var cli struct {
AST bool `help:"Print AST for expression."`
Set map[string]float64 `short:"s" help:"Set variables."`
Expression []string `arg required help:"Expression to evaluate."`
}
type Operator int
const (
OpMul Operator = iota
OpDiv
OpAdd
OpSub
)
var operatorMap = map[string]Operator{"+": OpAdd, "-": OpSub, "*": OpMul, "/": OpDiv}
func (o *Operator) Capture(s []string) error {
*o = operatorMap[s[0]]
return nil
}
// E --> T {( "+" | "-" ) T}
// T --> F {( "*" | "/" ) F}
// F --> P ["^" F]
// P --> v | "(" E ")" | "-" T
type Value struct {
Number *float64 ` @(Float|Int)`
Variable *string `| @Ident`
Subexpression *Expression `| "(" @@ ")"`
}
type Factor struct {
Base *Value `@@`
Exponent *Value `( "^" @@ )?`
}
type OpFactor struct {
Operator Operator `@("*" | "/")`
Factor *Factor `@@`
}
type Term struct {
Left *Factor `@@`
Right []*OpFactor `@@*`
}
type OpTerm struct {
Operator Operator `@("+" | "-")`
Term *Term `@@`
}
type Expression struct {
Left *Term `@@`
Right []*OpTerm `@@*`
}
// Display
func (o Operator) String() string {
switch o {
case OpMul:
return "*"
case OpDiv:
return "/"
case OpSub:
return "-"
case OpAdd:
return "+"
}
panic("unsupported operator")
}
func (v *Value) String() string {
if v.Number != nil {
return fmt.Sprintf("%g", *v.Number)
}
if v.Variable != nil {
return *v.Variable
}
return "(" + v.Subexpression.String() + ")"
}
func (f *Factor) String() string {
out := f.Base.String()
if f.Exponent != nil {
out += " ^ " + f.Exponent.String()
}
return out
}
func (o *OpFactor) String() string {
return fmt.Sprintf("%s %s", o.Operator, o.Factor)
}
func (t *Term) String() string {
out := []string{t.Left.String()}
for _, r := range t.Right {
out = append(out, r.String())
}
return strings.Join(out, " ")
}
func (o *OpTerm) String() string {
return fmt.Sprintf("%s %s", o.Operator, o.Term)
}
func (e *Expression) String() string {
out := []string{e.Left.String()}
for _, r := range e.Right {
out = append(out, r.String())
}
return strings.Join(out, " ")
}
// Evaluation
func (o Operator) Eval(l, r float64) float64 {
switch o {
case OpMul:
return l * r
case OpDiv:
return l / r
case OpAdd:
return l + r
case OpSub:
return l - r
}
panic("unsupported operator")
}
func (v *Value) Eval(ctx Context) float64 {
switch {
case v.Number != nil:
return *v.Number
case v.Variable != nil:
value, ok := ctx[*v.Variable]
if !ok {
panic("no such variable " + *v.Variable)
}
return value
default:
return v.Subexpression.Eval(ctx)
}
}
func (f *Factor) Eval(ctx Context) float64 {
b := f.Base.Eval(ctx)
if f.Exponent != nil {
return math.Pow(b, f.Exponent.Eval(ctx))
}
return b
}
func (t *Term) Eval(ctx Context) float64 {
n := t.Left.Eval(ctx)
for _, r := range t.Right {
n = r.Operator.Eval(n, r.Factor.Eval(ctx))
}
return n
}
func (e *Expression) Eval(ctx Context) float64 {
l := e.Left.Eval(ctx)
for _, r := range e.Right {
l = r.Operator.Eval(l, r.Term.Eval(ctx))
}
return l
}
type Context map[string]float64
var parser = participle.MustBuild[Expression]()
func main() {
ctx := kong.Parse(&cli,
kong.Description("A basic expression parser and evaluator."),
kong.UsageOnError(),
)
expr, err := parser.ParseString("", strings.Join(cli.Expression, " "))
ctx.FatalIfErrorf(err)
if cli.AST {
json.NewEncoder(os.Stdout).Encode(expr)
} else {
fmt.Println(expr, "=", expr.Eval(cli.Set))
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/expr/main_test.go 0000664 0000000 0000000 00000000367 15053003664 0026317 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
expr, err := parser.ParseString("", `1 + 2 / 3 * (1 + 2)`)
repr.Println(expr)
require.NoError(t, err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/expr2/ 0000775 0000000 0000000 00000000000 15053003664 0024061 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/expr2/main.go 0000664 0000000 0000000 00000004070 15053003664 0025335 0 ustar 00root root 0000000 0000000 package main
import (
"strings"
"github.com/alecthomas/kong"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
)
// Based on http://www.craftinginterpreters.com/parsing-expressions.html
// expression → equality ;
// equality → comparison ( ( "!=" | "==" ) comparison )* ;
// comparison → addition ( ( ">" | ">=" | "<" | "<=" ) addition )* ;
// addition → multiplication ( ( "-" | "+" ) multiplication )* ;
// multiplication → unary ( ( "/" | "*" ) unary )* ;
// unary → ( "!" | "-" ) unary
// | primary ;
// primary → NUMBER | STRING | "false" | "true" | "nil"
// | "(" expression ")" ;
type Expression struct {
Equality *Equality `@@`
}
type Equality struct {
Comparison *Comparison `@@`
Op string `( @( "!" "=" | "=" "=" )`
Next *Equality ` @@ )*`
}
type Comparison struct {
Addition *Addition `@@`
Op string `( @( ">" | ">" "=" | "<" | "<" "=" )`
Next *Comparison ` @@ )*`
}
type Addition struct {
Multiplication *Multiplication `@@`
Op string `( @( "-" | "+" )`
Next *Addition ` @@ )*`
}
type Multiplication struct {
Unary *Unary `@@`
Op string `( @( "/" | "*" )`
Next *Multiplication ` @@ )*`
}
type Unary struct {
Op string ` ( @( "!" | "-" )`
Unary *Unary ` @@ )`
Primary *Primary `| @@`
}
type Primary struct {
Number *float64 ` @Float | @Int`
String *string `| @String`
Bool *Boolean `| @( "true" | "false" )`
Nil bool `| @"nil"`
SubExpression *Expression `| "(" @@ ")" `
}
type Boolean bool
func (b *Boolean) Capture(values []string) error {
*b = values[0] == "true"
return nil
}
var parser = participle.MustBuild[Expression](participle.UseLookahead(2))
func main() {
var cli struct {
Expr []string `arg required help:"Expression to parse."`
}
ctx := kong.Parse(&cli)
expr, err := parser.ParseString("", strings.Join(cli.Expr, " "))
ctx.FatalIfErrorf(err)
repr.Println(expr)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/expr2/main_test.go 0000664 0000000 0000000 00000001616 15053003664 0026377 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
expr, err := parser.ParseString("", `1 + 2 / 3 * (1 + 2)`)
repr.Println(expr)
require.NoError(t, err)
}
func toPtr[T any](x T) *T {
return &x
}
func TestExe_BoolFalse(t *testing.T) {
got, err := parser.ParseString("", `1 + false`)
expected := &Expression{
Equality: &Equality{
Comparison: &Comparison{
Addition: &Addition{
Multiplication: &Multiplication{
Unary: &Unary{
Primary: &Primary{
Number: toPtr(float64(1)),
},
},
},
Op: "+",
Next: &Addition{
Multiplication: &Multiplication{
Unary: &Unary{
Primary: &Primary{
Bool: toPtr(Boolean(false)),
},
},
},
},
},
},
},
}
require.NoError(t, err)
require.Equal(t, expected, got)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/expr3/ 0000775 0000000 0000000 00000000000 15053003664 0024062 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/expr3/main.go 0000664 0000000 0000000 00000007176 15053003664 0025350 0 ustar 00root root 0000000 0000000 package main
import (
"strings"
"github.com/alecthomas/kong"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/repr"
)
type (
ExprString struct {
Value string `@String`
}
ExprNumber struct {
Value float64 `@Int | @Float`
}
ExprIdent struct {
Name string `@Ident`
}
ExprParens struct {
Inner ExprPrecAll `"(" @@ ")"`
}
ExprUnary struct {
Op string `@("-" | "!")`
Expr ExprOperand `@@`
}
ExprAddSub struct {
Head ExprPrec2 `@@`
Tail []ExprAddSubExt `@@+`
}
ExprAddSubExt struct {
Op string `@("+" | "-")`
Expr ExprPrec2 `@@`
}
ExprMulDiv struct {
Head ExprPrec3 `@@`
Tail []ExprMulDivExt `@@+`
}
ExprMulDivExt struct {
Op string `@("*" | "/")`
Expr ExprPrec3 `@@`
}
ExprRem struct {
Head ExprOperand `@@`
Tail []ExprRemExt `@@+`
}
ExprRemExt struct {
Op string `@"%"`
Expr ExprOperand `@@`
}
ExprPrecAll interface{ exprPrecAll() }
ExprPrec2 interface{ exprPrec2() }
ExprPrec3 interface{ exprPrec3() }
ExprOperand interface{ exprOperand() }
)
// These expression types can be matches as individual operands
func (ExprIdent) exprOperand() {}
func (ExprNumber) exprOperand() {}
func (ExprString) exprOperand() {}
func (ExprParens) exprOperand() {}
func (ExprUnary) exprOperand() {}
// These expression types can be matched at precedence level 3
func (ExprIdent) exprPrec3() {}
func (ExprNumber) exprPrec3() {}
func (ExprString) exprPrec3() {}
func (ExprParens) exprPrec3() {}
func (ExprUnary) exprPrec3() {}
func (ExprRem) exprPrec3() {}
// These expression types can be matched at precedence level 2
func (ExprIdent) exprPrec2() {}
func (ExprNumber) exprPrec2() {}
func (ExprString) exprPrec2() {}
func (ExprParens) exprPrec2() {}
func (ExprUnary) exprPrec2() {}
func (ExprRem) exprPrec2() {}
func (ExprMulDiv) exprPrec2() {}
// These expression types can be matched at the minimum precedence level
func (ExprIdent) exprPrecAll() {}
func (ExprNumber) exprPrecAll() {}
func (ExprString) exprPrecAll() {}
func (ExprParens) exprPrecAll() {}
func (ExprUnary) exprPrecAll() {}
func (ExprRem) exprPrecAll() {}
func (ExprMulDiv) exprPrecAll() {}
func (ExprAddSub) exprPrecAll() {}
type Expression struct {
X ExprPrecAll `@@`
}
var parser = participle.MustBuild[Expression](
// This grammar requires enough lookahead to see the entire expression before
// it can select the proper binary expression type - in other words, we only
// know that `1 * 2 * 3 * 4` isn't the left-hand side of an addition or subtraction
// expression until we know for sure that no `+` or `-` operator follows it
participle.UseLookahead(99999),
// Register the ExprOperand union so we can parse individual operands
participle.Union[ExprOperand](ExprUnary{}, ExprIdent{}, ExprNumber{}, ExprString{}, ExprParens{}),
// Register the ExprPrec3 union so we can parse expressions at precedence level 3
participle.Union[ExprPrec3](ExprRem{}, ExprUnary{}, ExprIdent{}, ExprNumber{}, ExprString{}, ExprParens{}),
// Register the ExprPrec2 union so we can parse expressions at precedence level 2
participle.Union[ExprPrec2](ExprMulDiv{}, ExprRem{}, ExprUnary{}, ExprIdent{}, ExprNumber{}, ExprString{}, ExprParens{}),
// Register the ExprPrecAll union so we can parse expressions at the minimum precedence level
participle.Union[ExprPrecAll](ExprAddSub{}, ExprMulDiv{}, ExprRem{}, ExprUnary{}, ExprIdent{}, ExprNumber{}, ExprString{}, ExprParens{}),
)
func main() {
var cli struct {
Expr []string `arg required help:"Expression to parse."`
}
ctx := kong.Parse(&cli)
expr, err := parser.ParseString("", strings.Join(cli.Expr, " "))
ctx.FatalIfErrorf(err)
repr.Println(expr)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/expr3/main_test.go 0000664 0000000 0000000 00000002304 15053003664 0026373 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestExpressionParser(t *testing.T) {
type testCase struct {
src string
expected ExprPrecAll
}
for _, c := range []testCase{
{`1`, ExprNumber{1}},
{`1.5`, ExprNumber{1.5}},
{`"a"`, ExprString{`"a"`}},
{`(1)`, ExprParens{ExprNumber{1}}},
{`1 + 1`, ExprAddSub{ExprNumber{1}, []ExprAddSubExt{{"+", ExprNumber{1}}}}},
{`1 - 1`, ExprAddSub{ExprNumber{1}, []ExprAddSubExt{{"-", ExprNumber{1}}}}},
{`1 * 1`, ExprMulDiv{ExprNumber{1}, []ExprMulDivExt{{"*", ExprNumber{1}}}}},
{`1 / 1`, ExprMulDiv{ExprNumber{1}, []ExprMulDivExt{{"/", ExprNumber{1}}}}},
{`1 % 1`, ExprRem{ExprNumber{1}, []ExprRemExt{{"%", ExprNumber{1}}}}},
{
`a + b - c * d / e % f`,
ExprAddSub{
ExprIdent{"a"},
[]ExprAddSubExt{
{"+", ExprIdent{"b"}},
{"-", ExprMulDiv{
ExprIdent{"c"},
[]ExprMulDivExt{
{"*", ExprIdent{Name: "d"}},
{"/", ExprRem{
ExprIdent{"e"},
[]ExprRemExt{{"%", ExprIdent{"f"}}},
}},
},
}},
},
},
},
} {
actual, err := parser.ParseString("", c.src)
require.NoError(t, err)
require.Equal(t, c.expected, actual.X)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/expr4/ 0000775 0000000 0000000 00000000000 15053003664 0024063 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/expr4/main.go 0000664 0000000 0000000 00000005315 15053003664 0025342 0 ustar 00root root 0000000 0000000 package main
import (
"fmt"
"strconv"
"strings"
"text/scanner"
"github.com/alecthomas/kong"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
"github.com/alecthomas/repr"
)
type operatorPrec struct{ Left, Right int }
var operatorPrecs = map[string]operatorPrec{
"+": {1, 1},
"-": {1, 1},
"*": {3, 2},
"/": {5, 4},
"%": {7, 6},
}
type (
Expr interface{ expr() }
ExprIdent struct{ Name string }
ExprString struct{ Value string }
ExprNumber struct{ Value float64 }
ExprParens struct{ Sub Expr }
ExprUnary struct {
Op string
Sub Expr
}
ExprBinary struct {
Lhs Expr
Op string
Rhs Expr
}
)
func (ExprIdent) expr() {}
func (ExprString) expr() {}
func (ExprNumber) expr() {}
func (ExprParens) expr() {}
func (ExprUnary) expr() {}
func (ExprBinary) expr() {}
func parseExprAny(lex *lexer.PeekingLexer) (Expr, error) { return parseExprPrec(lex, 0) }
func parseExprAtom(lex *lexer.PeekingLexer) (Expr, error) {
switch peek := lex.Peek(); {
case peek.Type == scanner.Ident:
return ExprIdent{lex.Next().Value}, nil
case peek.Type == scanner.String:
val, err := strconv.Unquote(lex.Next().Value)
if err != nil {
return nil, err
}
return ExprString{val}, nil
case peek.Type == scanner.Int || peek.Type == scanner.Float:
val, err := strconv.ParseFloat(lex.Next().Value, 64)
if err != nil {
return nil, err
}
return ExprNumber{val}, nil
case peek.Value == "(":
_ = lex.Next()
inner, err := parseExprAny(lex)
if err != nil {
return nil, err
}
if lex.Peek().Value != ")" {
return nil, fmt.Errorf("expected closing ')'")
}
_ = lex.Next()
return ExprParens{inner}, nil
default:
return nil, participle.NextMatch
}
}
func parseExprPrec(lex *lexer.PeekingLexer, minPrec int) (Expr, error) {
var lhs Expr
if peeked := lex.Peek(); peeked.Value == "-" || peeked.Value == "!" {
op := lex.Next().Value
atom, err := parseExprAtom(lex)
if err != nil {
return nil, err
}
lhs = ExprUnary{op, atom}
} else {
atom, err := parseExprAtom(lex)
if err != nil {
return nil, err
}
lhs = atom
}
for {
peek := lex.Peek()
prec, isOp := operatorPrecs[peek.Value]
if !isOp || prec.Left < minPrec {
break
}
op := lex.Next().Value
rhs, err := parseExprPrec(lex, prec.Right)
if err != nil {
return nil, err
}
lhs = ExprBinary{lhs, op, rhs}
}
return lhs, nil
}
type Expression struct {
X Expr `@@`
}
var parser = participle.MustBuild[Expression](participle.ParseTypeWith(parseExprAny))
func main() {
var cli struct {
Expr []string `arg required help:"Expression to parse."`
}
ctx := kong.Parse(&cli)
expr, err := parser.ParseString("", strings.Join(cli.Expr, " "))
ctx.FatalIfErrorf(err)
repr.Println(expr)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/expr4/main_test.go 0000664 0000000 0000000 00000002704 15053003664 0026400 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestCustomExprParser(t *testing.T) {
type testCase struct {
src string
expected Expr
}
for _, c := range []testCase{
{`1`, ExprNumber{1}},
{`1.5`, ExprNumber{1.5}},
{`"a"`, ExprString{"a"}},
{`(1)`, ExprParens{ExprNumber{1}}},
{`1+1`, ExprBinary{ExprNumber{1}, "+", ExprNumber{1}}},
{`1-1`, ExprBinary{ExprNumber{1}, "-", ExprNumber{1}}},
{`1*1`, ExprBinary{ExprNumber{1}, "*", ExprNumber{1}}},
{`1/1`, ExprBinary{ExprNumber{1}, "/", ExprNumber{1}}},
{`1%1`, ExprBinary{ExprNumber{1}, "%", ExprNumber{1}}},
{`a - -b`, ExprBinary{ExprIdent{"a"}, "-", ExprUnary{"-", ExprIdent{"b"}}}},
{
`a + b - c * d / e % f`,
ExprBinary{
ExprIdent{"a"}, "+", ExprBinary{
ExprIdent{"b"}, "-", ExprBinary{
ExprIdent{"c"}, "*", ExprBinary{
ExprIdent{"d"}, "/", ExprBinary{
ExprIdent{"e"}, "%", ExprIdent{"f"},
},
},
},
},
},
},
{
`a * b + c * d`,
ExprBinary{
ExprBinary{ExprIdent{"a"}, "*", ExprIdent{"b"}},
"+",
ExprBinary{ExprIdent{"c"}, "*", ExprIdent{"d"}},
},
},
{
`(a + b) * (c + d)`,
ExprBinary{
ExprParens{ExprBinary{ExprIdent{"a"}, "+", ExprIdent{"b"}}},
"*",
ExprParens{ExprBinary{ExprIdent{"c"}, "+", ExprIdent{"d"}}},
},
},
} {
actual, err := parser.ParseString("", c.src)
require.NoError(t, err)
require.Equal(t, c.expected, actual.X)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/generics/ 0000775 0000000 0000000 00000000000 15053003664 0024620 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/generics/main.go 0000664 0000000 0000000 00000002011 15053003664 0026065 0 ustar 00root root 0000000 0000000 package main
import (
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
)
type Generic struct {
Params []string `"<" (@Ident ","?)+ ">" (?= ("(" | ")" | "]" | ":" | ";" | "," | "." | "?" | "=" "=" | "!" "="))`
}
type Call struct {
Params []*Expr `( @@ ","?)*`
}
type Terminal struct {
Ident string ` @Ident`
Number int `| @Int`
Sub *Expr `| "(" @@ ")"`
}
type Expr struct {
Terminal *Terminal `@@`
Generic *Generic `( @@`
RHS *RHS ` | @@ )?`
Call *Call `( "(" @@ ")"`
Reference *Expr ` | "." @@ )?`
}
type RHS struct {
Oper string `@("<" | ">" | "=" "=" | "!" "=" | "+" | "-" | "*" | "/" | "&" "&")`
RHS *Expr `@@`
}
var parser = participle.MustBuild[Expr](participle.UseLookahead(1024))
func main() {
expr, err := parser.ParseString("", "hello < world * (1 + 3) && (world > 10)")
if err != nil {
panic(err)
}
repr.Println(expr)
expr, err = parser.ParseString("", "type.method(1, 2, 3)")
if err != nil {
panic(err)
}
repr.Println(expr)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/go.mod 0000664 0000000 0000000 00000000573 15053003664 0024134 0 ustar 00root root 0000000 0000000 module github.com/alecthomas/participle/v2/_examples
go 1.18
require (
github.com/alecthomas/assert/v2 v2.11.0
github.com/alecthomas/go-thrift v0.0.3
github.com/alecthomas/kong v1.6.1
github.com/alecthomas/participle/v2 v2.1.1
github.com/alecthomas/repr v0.4.0
)
require github.com/hexops/gotextdiff v1.0.3 // indirect
replace github.com/alecthomas/participle/v2 => ../
golang-github-alecthomas-participle-v2-2.1.4/_examples/go.sum 0000664 0000000 0000000 00000003440 15053003664 0024155 0 ustar 00root root 0000000 0000000 github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU=
github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/assert/v2 v2.10.0 h1:jjRCHsj6hBJhkmhznrCzoNpbA3zqy0fYiUcYZP/GkPY=
github.com/alecthomas/assert/v2 v2.10.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/go-thrift v0.0.0-20220915213326-b383ff0e9ca1 h1:1dmVFISCxlfv+qSa2ak7TkebZ8w4kTRCqb4Uoj9MG5U=
github.com/alecthomas/go-thrift v0.0.0-20220915213326-b383ff0e9ca1/go.mod h1:8dI6rFLWpVn5UKQjYBQMzTAszkI5SDMGOy7iHYbR0sw=
github.com/alecthomas/go-thrift v0.0.3 h1:wKTw+PCQQqOCt+6MCLxl+lFk1/aJ4AJVd4Iek3fibk8=
github.com/alecthomas/go-thrift v0.0.3/go.mod h1:8dI6rFLWpVn5UKQjYBQMzTAszkI5SDMGOy7iHYbR0sw=
github.com/alecthomas/kong v0.8.1 h1:acZdn3m4lLRobeh3Zi2S2EpnXTd1mOL6U7xVml+vfkY=
github.com/alecthomas/kong v0.8.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
github.com/alecthomas/kong v1.2.1 h1:E8jH4Tsgv6wCRX2nGrdPyHDUCSG83WH2qE4XLACD33Q=
github.com/alecthomas/kong v1.2.1/go.mod h1:rKTSFhbdp3Ryefn8x5MOEprnRFQ7nlmMC01GKhehhBM=
github.com/alecthomas/kong v1.6.1 h1:/7bVimARU3uxPD0hbryPE8qWrS3Oz3kPQoxA/H2NKG8=
github.com/alecthomas/kong v1.6.1/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
golang-github-alecthomas-participle-v2-2.1.4/_examples/graphql/ 0000775 0000000 0000000 00000000000 15053003664 0024457 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/graphql/example.graphql 0000664 0000000 0000000 00000001754 15053003664 0027501 0 ustar 00root root 0000000 0000000 # A comment.
type Tweet {
id: ID!
# The tweet text. No more than 140 characters!
body: String
# When the tweet was published
date: Date
# Who published the tweet
Author: User
# Views, retweets, likes, etc
Stats: Stat
}
type User {
id: ID!
username: String
first_name: String
last_name: String
full_name: String
name: String @deprecated
avatar_url: Url
}
type Stat {
views: Int
likes: Int
retweets: Int
responses: Int
}
type Notification {
id: ID
date: Date
type: String
}
type Meta {
count: Int
}
scalar Url
scalar Date
type Query {
Tweet(id: ID!): Tweet
Tweets(limit: Int, skip: Int, sort_field: String, sort_order: String): [Tweet]
TweetsMeta: Meta
User(id: ID!): User
Notifications(limit: Int): [Notification]
NotificationsMeta: Meta
}
type Mutation {
createTweet (
body: String
): Tweet
deleteTweet(id: ID!): Tweet
markTweetRead(id: ID!): Boolean
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/graphql/main.go 0000664 0000000 0000000 00000003647 15053003664 0025744 0 ustar 00root root 0000000 0000000 package main
import (
"fmt"
"os"
"github.com/alecthomas/kong"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
type File struct {
Entries []*Entry `@@*`
}
type Entry struct {
Type *Type ` @@`
Schema *Schema `| @@`
Enum *Enum `| @@`
Scalar string `| "scalar" @Ident`
}
type Enum struct {
Name string `"enum" @Ident`
Cases []string `"{" @Ident* "}"`
}
type Schema struct {
Fields []*Field `"schema" "{" @@* "}"`
}
type Type struct {
Name string `"type" @Ident`
Implements string `( "implements" @Ident )?`
Fields []*Field `"{" @@* "}"`
}
type Field struct {
Name string `@Ident`
Arguments []*Argument `( "(" ( @@ ( "," @@ )* )? ")" )?`
Type *TypeRef `":" @@`
Annotation string `( "@" @Ident )?`
}
type Argument struct {
Name string `@Ident`
Type *TypeRef `":" @@`
Default *Value `( "=" @@ )?`
}
type TypeRef struct {
Array *TypeRef `( "[" @@ "]"`
Type string ` | @Ident )`
NonNullable bool `@"!"?`
}
type Value struct {
Symbol string `@Ident`
}
var (
graphQLLexer = lexer.MustSimple([]lexer.SimpleRule{
{"Comment", `(?:#|//)[^\n]*\n?`},
{"Ident", `[a-zA-Z]\w*`},
{"Number", `(?:\d*\.)?\d+`},
{"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`},
{"Whitespace", `[ \t\n\r]+`},
})
parser = participle.MustBuild[File](
participle.Lexer(graphQLLexer),
participle.Elide("Comment", "Whitespace"),
participle.UseLookahead(2),
)
)
var cli struct {
EBNF bool `help"Dump EBNF."`
Files []string `arg:"" optional:"" type:"existingfile" help:"GraphQL schema files to parse."`
}
func main() {
ctx := kong.Parse(&cli)
if cli.EBNF {
fmt.Println(parser.String())
ctx.Exit(0)
}
for _, file := range cli.Files {
r, err := os.Open(file)
ctx.FatalIfErrorf(err)
ast, err := parser.Parse("", r)
r.Close()
repr.Println(ast)
ctx.FatalIfErrorf(err)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/graphql/main_test.go 0000664 0000000 0000000 00000000532 15053003664 0026771 0 ustar 00root root 0000000 0000000 package main
import (
"io/ioutil"
"testing"
require "github.com/alecthomas/assert/v2"
)
func BenchmarkParser(b *testing.B) {
source, err := ioutil.ReadFile("example.graphql")
require.NoError(b, err)
b.ReportAllocs()
b.ReportMetric(float64(len(source)*b.N), "B/s")
for i := 0; i < b.N; i++ {
_, _ = parser.ParseBytes("", source)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/hcl/ 0000775 0000000 0000000 00000000000 15053003664 0023567 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/hcl/example.hcl 0000664 0000000 0000000 00000001172 15053003664 0025713 0 ustar 00root root 0000000 0000000 region = "us-west-2"
access_key = "something"
secret_key = "something_else"
bucket = "backups"
directory config {
source_dir = "/etc/eventstore"
dest_prefix = "escluster/config"
exclude = ["*.hcl"]
pre_backup_script = "before_backup.sh"
post_backup_script = "after_backup.sh"
pre_restore_script = "before_restore.sh"
post_restore_script = "after_restore.sh"
chmod = 0755
}
directory data {
source_dir = "/var/lib/eventstore"
dest_prefix = "escluster/a/data"
exclude = [
"*.merging"
]
pre_restore_script = "before_restore.sh"
post_restore_script = "after_restore.sh"
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/hcl/main.go 0000664 0000000 0000000 00000002642 15053003664 0025046 0 ustar 00root root 0000000 0000000 // Package main implements a parser for HashiCorp's HCL configuration syntax.
package main
import (
"fmt"
"os"
"strings"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
)
type Bool bool
func (b *Bool) Capture(v []string) error { *b = v[0] == "true"; return nil }
type Value struct {
Boolean *Bool ` @("true"|"false")`
Identifier *string `| @Ident ( @"." @Ident )*`
String *string `| @(String|Char|RawString)`
Number *float64 `| @(Float|Int)`
Array []*Value `| "[" ( @@ ","? )* "]"`
}
func (l *Value) GoString() string {
switch {
case l.Boolean != nil:
return fmt.Sprintf("%v", *l.Boolean)
case l.Identifier != nil:
return fmt.Sprintf("`%s`", *l.Identifier)
case l.String != nil:
return fmt.Sprintf("%q", *l.String)
case l.Number != nil:
return fmt.Sprintf("%v", *l.Number)
case l.Array != nil:
out := []string{}
for _, v := range l.Array {
out = append(out, v.GoString())
}
return fmt.Sprintf("[]*Value{ %s }", strings.Join(out, ", "))
}
panic("??")
}
type Entry struct {
Key string `@Ident`
Value *Value `( "=" @@`
Block *Block ` | @@ )`
}
type Block struct {
Parameters []*Value `@@*`
Entries []*Entry `"{" @@* "}"`
}
type Config struct {
Entries []*Entry `@@*`
}
var parser = participle.MustBuild[Config](participle.Unquote())
func main() {
expr, err := parser.Parse("", os.Stdin)
if err != nil {
panic(err)
}
repr.Println(expr)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/hcl/main_test.go 0000664 0000000 0000000 00000001535 15053003664 0026105 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
ast, err := parser.ParseString("", `
region = "us-west-2"
access_key = "something"
secret_key = "something_else"
bucket = "backups"
directory config {
source_dir = "/etc/eventstore"
dest_prefix = "escluster/config"
exclude = ["*.hcl"]
pre_backup_script = "before_backup.sh"
post_backup_script = "after_backup.sh"
pre_restore_script = "before_restore.sh"
post_restore_script = "after_restore.sh"
chmod = 0755
}
directory data {
source_dir = "/var/lib/eventstore"
dest_prefix = "escluster/a/data"
exclude = [
"*.merging"
]
pre_restore_script = "before_restore.sh"
post_restore_script = "after_restore.sh"
}
`)
repr.Println(ast)
require.NoError(t, err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/ini/ 0000775 0000000 0000000 00000000000 15053003664 0023600 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/ini/example.ini 0000664 0000000 0000000 00000000155 15053003664 0025735 0 ustar 00root root 0000000 0000000 a = "a"
b = 123
# A comment
[numbers]
a = 10.3
b = 20
; Another comment
[strings]
a = "\"quoted\""
b = "b"
golang-github-alecthomas-participle-v2-2.1.4/_examples/ini/main.go 0000664 0000000 0000000 00000002370 15053003664 0025055 0 ustar 00root root 0000000 0000000 package main
import (
"os"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
// A custom lexer for INI files. This illustrates a relatively complex Regexp lexer, as well
// as use of the Unquote filter, which unquotes string tokens.
var (
iniLexer = lexer.MustSimple([]lexer.SimpleRule{
{`Ident`, `[a-zA-Z][a-zA-Z_\d]*`},
{`String`, `"(?:\\.|[^"])*"`},
{`Float`, `\d+(?:\.\d+)?`},
{`Punct`, `[][=]`},
{"comment", `[#;][^\n]*`},
{"whitespace", `\s+`},
})
parser = participle.MustBuild[INI](
participle.Lexer(iniLexer),
participle.Unquote("String"),
participle.Union[Value](String{}, Number{}),
)
)
type INI struct {
Properties []*Property `@@*`
Sections []*Section `@@*`
}
type Section struct {
Identifier string `"[" @Ident "]"`
Properties []*Property `@@*`
}
type Property struct {
Key string `@Ident "="`
Value Value `@@`
}
type Value interface{ value() }
type String struct {
String string `@String`
}
func (String) value() {}
type Number struct {
Number float64 `@Float`
}
func (Number) value() {}
func main() {
ini, err := parser.Parse("", os.Stdin)
repr.Println(ini, repr.Indent(" "), repr.OmitEmpty(true))
if err != nil {
panic(err)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/ini/main_test.go 0000664 0000000 0000000 00000000407 15053003664 0026113 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
ini, err := parser.ParseString("", `
global = 1
[section]
value = "str"
`)
require.NoError(t, err)
repr.Println(ini)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/json/ 0000775 0000000 0000000 00000000000 15053003664 0023772 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/json/main.go 0000664 0000000 0000000 00000003431 15053003664 0025246 0 ustar 00root root 0000000 0000000 // nolint: golint, dupl
package main
import (
"os"
"github.com/alecthomas/kong"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var (
jsonLexer = lexer.MustSimple([]lexer.SimpleRule{
{Name: "Comment", Pattern: `\/\/[^\n]*`},
{Name: "String", Pattern: `"(\\"|[^"])*"`},
{Name: "Number", Pattern: `[-+]?(\d*\.)?\d+`},
{Name: "Punct", Pattern: `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`},
{Name: "Null", Pattern: "null"},
{Name: "True", Pattern: "true"},
{Name: "False", Pattern: "false"},
{Name: "EOL", Pattern: `[\n\r]+`},
{Name: "Whitespace", Pattern: `[ \t]+`},
})
jsonParser = participle.MustBuild[Json](
participle.Lexer(jsonLexer),
participle.Unquote("String"),
participle.Elide("Whitespace", "EOL"),
participle.UseLookahead(2),
)
cli struct {
File string `arg:"" type:"existingfile" help:"File to parse."`
}
)
// Parse a Json string.
func Parse(data []byte) (*Json, error) {
json, err := jsonParser.ParseBytes("", data)
if err != nil {
return nil, err
}
return json, nil
}
type Json struct {
Pos lexer.Position
Object *Object `parser:"@@ |"`
Array *Array `parser:"@@ |"`
Number *string `parser:"@Number |"`
String *string `parser:"@String |"`
False *string `parser:"@False |"`
True *string `parser:"@True |"`
Null *string `parser:"@Null"`
}
type Object struct {
Pos lexer.Position
Pairs []*Pair `parser:"'{' @@ (',' @@)* '}'"`
}
type Pair struct {
Pos lexer.Position
Key string `parser:"@String ':'"`
Value *Json `parser:"@@"`
}
type Array struct {
Pos lexer.Position
Items []*Json `parser:"'[' @@ (',' @@)* ']'"`
}
func main() {
ctx := kong.Parse(&cli)
data, err := os.ReadFile(cli.File)
ctx.FatalIfErrorf(err)
res, err := Parse(data)
ctx.FatalIfErrorf(err)
ctx.Printf("res is: %v", res)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/json/main_test.go 0000664 0000000 0000000 00000000366 15053003664 0026311 0 ustar 00root root 0000000 0000000 package main
import (
"os"
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestParse(t *testing.T) {
src, err := os.ReadFile("./test.json")
require.NoError(t, err)
_, err = Parse(src)
require.NoError(t, err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/json/test.json 0000664 0000000 0000000 00000000346 15053003664 0025647 0 ustar 00root root 0000000 0000000 {
"list": [1, 1.2, 1, -1, {"foo": "bar"}, true, false, null],
"object": {
"foo1": "bar2",
"foo2": true,
"foo3": false,
"foo4": null,
"foo5": 1,
"foo6": "ss"
}
} golang-github-alecthomas-participle-v2-2.1.4/_examples/jsonpath/ 0000775 0000000 0000000 00000000000 15053003664 0024647 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/jsonpath/github-webhook.json 0000664 0000000 0000000 00000033234 15053003664 0030465 0 ustar 00root root 0000000 0000000 {
"action": "created",
"check_run": {
"id": 128620228,
"node_id": "MDg6Q2hlY2tSdW4xMjg2MjAyMjg=",
"head_sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821",
"external_id": "",
"url": "https://api.github.com/repos/Codertocat/Hello-World/check-runs/128620228",
"html_url": "https://github.com/Codertocat/Hello-World/runs/128620228",
"details_url": "https://octocoders.io",
"status": "queued",
"conclusion": null,
"started_at": "2019-05-15T15:21:12Z",
"completed_at": null,
"output": {
"title": null,
"summary": null,
"text": null,
"annotations_count": 0,
"annotations_url": "https://api.github.com/repos/Codertocat/Hello-World/check-runs/128620228/annotations"
},
"name": "Octocoders-linter",
"check_suite": {
"id": 118578147,
"node_id": "MDEwOkNoZWNrU3VpdGUxMTg1NzgxNDc=",
"head_branch": "changes",
"head_sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821",
"status": "queued",
"conclusion": null,
"url": "https://api.github.com/repos/Codertocat/Hello-World/check-suites/118578147",
"before": "6113728f27ae82c7b1a177c8d03f9e96e0adf246",
"after": "ec26c3e57ca3a959ca5aad62de7213c562f8c821",
"pull_requests": [
{
"url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2",
"id": 279147437,
"number": 2,
"head": {
"ref": "changes",
"sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821",
"repo": {
"id": 186853002,
"url": "https://api.github.com/repos/Codertocat/Hello-World",
"name": "Hello-World"
}
},
"base": {
"ref": "master",
"sha": "f95f852bd8fca8fcc58a9a2d6c842781e32a215e",
"repo": {
"id": 186853002,
"url": "https://api.github.com/repos/Codertocat/Hello-World",
"name": "Hello-World"
}
}
}
],
"app": {
"id": 29310,
"node_id": "MDM6QXBwMjkzMTA=",
"owner": {
"login": "Octocoders",
"id": 38302899,
"node_id": "MDEyOk9yZ2FuaXphdGlvbjM4MzAyODk5",
"avatar_url": "https://avatars1.githubusercontent.com/u/38302899?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Octocoders",
"html_url": "https://github.com/Octocoders",
"followers_url": "https://api.github.com/users/Octocoders/followers",
"following_url": "https://api.github.com/users/Octocoders/following{/other_user}",
"gists_url": "https://api.github.com/users/Octocoders/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Octocoders/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Octocoders/subscriptions",
"organizations_url": "https://api.github.com/users/Octocoders/orgs",
"repos_url": "https://api.github.com/users/Octocoders/repos",
"events_url": "https://api.github.com/users/Octocoders/events{/privacy}",
"received_events_url": "https://api.github.com/users/Octocoders/received_events",
"type": "Organization",
"site_admin": false
},
"name": "octocoders-linter",
"description": "",
"external_url": "https://octocoders.io",
"html_url": "https://github.com/apps/octocoders-linter",
"created_at": "2019-04-19T19:36:24Z",
"updated_at": "2019-04-19T19:36:56Z",
"permissions": {
"administration": "write",
"checks": "write",
"contents": "write",
"deployments": "write",
"issues": "write",
"members": "write",
"metadata": "read",
"organization_administration": "write",
"organization_hooks": "write",
"organization_plan": "read",
"organization_projects": "write",
"organization_user_blocking": "write",
"pages": "write",
"pull_requests": "write",
"repository_hooks": "write",
"repository_projects": "write",
"statuses": "write",
"team_discussions": "write",
"vulnerability_alerts": "read"
},
"events": []
},
"created_at": "2019-05-15T15:20:31Z",
"updated_at": "2019-05-15T15:20:31Z"
},
"app": {
"id": 29310,
"node_id": "MDM6QXBwMjkzMTA=",
"owner": {
"login": "Octocoders",
"id": 38302899,
"node_id": "MDEyOk9yZ2FuaXphdGlvbjM4MzAyODk5",
"avatar_url": "https://avatars1.githubusercontent.com/u/38302899?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Octocoders",
"html_url": "https://github.com/Octocoders",
"followers_url": "https://api.github.com/users/Octocoders/followers",
"following_url": "https://api.github.com/users/Octocoders/following{/other_user}",
"gists_url": "https://api.github.com/users/Octocoders/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Octocoders/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Octocoders/subscriptions",
"organizations_url": "https://api.github.com/users/Octocoders/orgs",
"repos_url": "https://api.github.com/users/Octocoders/repos",
"events_url": "https://api.github.com/users/Octocoders/events{/privacy}",
"received_events_url": "https://api.github.com/users/Octocoders/received_events",
"type": "Organization",
"site_admin": false
},
"name": "octocoders-linter",
"description": "",
"external_url": "https://octocoders.io",
"html_url": "https://github.com/apps/octocoders-linter",
"created_at": "2019-04-19T19:36:24Z",
"updated_at": "2019-04-19T19:36:56Z",
"permissions": {
"administration": "write",
"checks": "write",
"contents": "write",
"deployments": "write",
"issues": "write",
"members": "write",
"metadata": "read",
"organization_administration": "write",
"organization_hooks": "write",
"organization_plan": "read",
"organization_projects": "write",
"organization_user_blocking": "write",
"pages": "write",
"pull_requests": "write",
"repository_hooks": "write",
"repository_projects": "write",
"statuses": "write",
"team_discussions": "write",
"vulnerability_alerts": "read"
},
"events": []
},
"pull_requests": [
{
"url": "https://api.github.com/repos/Codertocat/Hello-World/pulls/2",
"id": 279147437,
"number": 2,
"head": {
"ref": "changes",
"sha": "ec26c3e57ca3a959ca5aad62de7213c562f8c821",
"repo": {
"id": 186853002,
"url": "https://api.github.com/repos/Codertocat/Hello-World",
"name": "Hello-World"
}
},
"base": {
"ref": "master",
"sha": "f95f852bd8fca8fcc58a9a2d6c842781e32a215e",
"repo": {
"id": 186853002,
"url": "https://api.github.com/repos/Codertocat/Hello-World",
"name": "Hello-World"
}
}
}
]
},
"repository": {
"id": 186853002,
"node_id": "MDEwOlJlcG9zaXRvcnkxODY4NTMwMDI=",
"name": "Hello-World",
"full_name": "Codertocat/Hello-World",
"private": false,
"owner": {
"login": "Codertocat",
"id": 21031067,
"node_id": "MDQ6VXNlcjIxMDMxMDY3",
"avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Codertocat",
"html_url": "https://github.com/Codertocat",
"followers_url": "https://api.github.com/users/Codertocat/followers",
"following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
"gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
"organizations_url": "https://api.github.com/users/Codertocat/orgs",
"repos_url": "https://api.github.com/users/Codertocat/repos",
"events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/Codertocat/received_events",
"type": "User",
"site_admin": false
},
"html_url": "https://github.com/Codertocat/Hello-World",
"description": null,
"fork": false,
"url": "https://api.github.com/repos/Codertocat/Hello-World",
"forks_url": "https://api.github.com/repos/Codertocat/Hello-World/forks",
"keys_url": "https://api.github.com/repos/Codertocat/Hello-World/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/Codertocat/Hello-World/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/Codertocat/Hello-World/teams",
"hooks_url": "https://api.github.com/repos/Codertocat/Hello-World/hooks",
"issue_events_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/events{/number}",
"events_url": "https://api.github.com/repos/Codertocat/Hello-World/events",
"assignees_url": "https://api.github.com/repos/Codertocat/Hello-World/assignees{/user}",
"branches_url": "https://api.github.com/repos/Codertocat/Hello-World/branches{/branch}",
"tags_url": "https://api.github.com/repos/Codertocat/Hello-World/tags",
"blobs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/Codertocat/Hello-World/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/Codertocat/Hello-World/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/Codertocat/Hello-World/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/Codertocat/Hello-World/statuses/{sha}",
"languages_url": "https://api.github.com/repos/Codertocat/Hello-World/languages",
"stargazers_url": "https://api.github.com/repos/Codertocat/Hello-World/stargazers",
"contributors_url": "https://api.github.com/repos/Codertocat/Hello-World/contributors",
"subscribers_url": "https://api.github.com/repos/Codertocat/Hello-World/subscribers",
"subscription_url": "https://api.github.com/repos/Codertocat/Hello-World/subscription",
"commits_url": "https://api.github.com/repos/Codertocat/Hello-World/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/Codertocat/Hello-World/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/Codertocat/Hello-World/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/Codertocat/Hello-World/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/Codertocat/Hello-World/contents/{+path}",
"compare_url": "https://api.github.com/repos/Codertocat/Hello-World/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/Codertocat/Hello-World/merges",
"archive_url": "https://api.github.com/repos/Codertocat/Hello-World/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/Codertocat/Hello-World/downloads",
"issues_url": "https://api.github.com/repos/Codertocat/Hello-World/issues{/number}",
"pulls_url": "https://api.github.com/repos/Codertocat/Hello-World/pulls{/number}",
"milestones_url": "https://api.github.com/repos/Codertocat/Hello-World/milestones{/number}",
"notifications_url": "https://api.github.com/repos/Codertocat/Hello-World/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/Codertocat/Hello-World/labels{/name}",
"releases_url": "https://api.github.com/repos/Codertocat/Hello-World/releases{/id}",
"deployments_url": "https://api.github.com/repos/Codertocat/Hello-World/deployments",
"created_at": "2019-05-15T15:19:25Z",
"updated_at": "2019-05-15T15:21:03Z",
"pushed_at": "2019-05-15T15:20:57Z",
"git_url": "git://github.com/Codertocat/Hello-World.git",
"ssh_url": "git@github.com:Codertocat/Hello-World.git",
"clone_url": "https://github.com/Codertocat/Hello-World.git",
"svn_url": "https://github.com/Codertocat/Hello-World",
"homepage": null,
"size": 0,
"stargazers_count": 0,
"watchers_count": 0,
"language": "Ruby",
"has_issues": true,
"has_projects": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": true,
"forks_count": 1,
"mirror_url": null,
"archived": false,
"disabled": false,
"open_issues_count": 2,
"license": null,
"forks": 1,
"open_issues": 2,
"watchers": 0,
"default_branch": "master"
},
"sender": {
"login": "Codertocat",
"id": 21031067,
"node_id": "MDQ6VXNlcjIxMDMxMDY3",
"avatar_url": "https://avatars1.githubusercontent.com/u/21031067?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Codertocat",
"html_url": "https://github.com/Codertocat",
"followers_url": "https://api.github.com/users/Codertocat/followers",
"following_url": "https://api.github.com/users/Codertocat/following{/other_user}",
"gists_url": "https://api.github.com/users/Codertocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Codertocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Codertocat/subscriptions",
"organizations_url": "https://api.github.com/users/Codertocat/orgs",
"repos_url": "https://api.github.com/users/Codertocat/repos",
"events_url": "https://api.github.com/users/Codertocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/Codertocat/received_events",
"type": "User",
"site_admin": false
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/jsonpath/main.go 0000664 0000000 0000000 00000004430 15053003664 0026123 0 ustar 00root root 0000000 0000000 package main
import (
"encoding/json"
"fmt"
"os"
"github.com/alecthomas/participle/v2"
)
type pathExpr struct {
Parts []part `@@ ( "." @@ )*`
}
type part struct {
Obj string `@Ident`
Acc []acc `("[" @@ "]")*`
}
type acc struct {
Name *string `@(String|Char|RawString)`
Index *int `| @Int`
}
var parser = participle.MustBuild[pathExpr]()
func main() {
if len(os.Args) < 3 {
fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0])
os.Exit(2)
}
q := os.Args[1]
files := os.Args[2:]
expr, err := parser.ParseString("", q)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
for _, file := range files {
f, err := os.Open(file)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
var input map[string]interface{}
if err := json.NewDecoder(f).Decode(&input); err != nil {
f.Close()
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
f.Close()
result, err := match(input, expr)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
switch r := result.(type) {
case map[string]interface{}:
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
_ = enc.Encode(r)
default:
fmt.Printf("%v\n", r)
}
}
}
func match(input map[string]interface{}, expr *pathExpr) (interface{}, error) {
var v interface{} = input
for _, e := range expr.Parts {
switch m := v.(type) {
case map[string]interface{}:
val, ok := m[e.Obj]
if !ok {
return nil, fmt.Errorf("not found: %q", e.Obj)
}
v = val
for _, a := range e.Acc {
if a.Name != nil {
switch m := v.(type) {
case map[string]interface{}:
val, ok = m[*a.Name].(map[string]interface{})
if !ok {
return nil, fmt.Errorf("not found: %q does not contain %q", e.Obj, *a.Name)
}
v = val
default:
return nil, fmt.Errorf("cannot access named index in %T", v)
}
}
if a.Index != nil {
switch s := v.(type) {
case []interface{}:
if len(s) <= *a.Index {
return nil, fmt.Errorf("not found: %q does contains %d items", e.Obj, len(s))
}
v = s[*a.Index]
default:
return nil, fmt.Errorf("cannot access numeric index in %T", v)
}
}
}
default:
return nil, fmt.Errorf("cannot read %q, parent is not a map", e.Obj)
}
}
return v, nil
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/jsonpath/main_test.go 0000664 0000000 0000000 00000001060 15053003664 0027156 0 ustar 00root root 0000000 0000000 package main
import (
"encoding/json"
"os"
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestExe(t *testing.T) {
r, err := os.Open("github-webhook.json")
require.NoError(t, err)
input := map[string]interface{}{}
err = json.NewDecoder(r).Decode(&input)
require.NoError(t, err)
ast, err := parser.ParseString(``, `check_run.check_suite.pull_requests[0].url`)
require.NoError(t, err)
result, err := match(input, ast)
require.NoError(t, err)
require.Equal(t, "https://api.github.com/repos/Codertocat/Hello-World/pulls/2", result)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/microc/ 0000775 0000000 0000000 00000000000 15053003664 0024275 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/microc/main.go 0000664 0000000 0000000 00000013156 15053003664 0025556 0 ustar 00root root 0000000 0000000 package main
import (
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
// https://www.it.uu.se/katalog/aleji304/CompilersProject/uc.html
//
// program ::= topdec_list
// topdec_list ::= /empty/ | topdec topdec_list
// topdec ::= vardec ";"
// | funtype ident "(" formals ")" funbody
// vardec ::= scalardec | arraydec
// scalardec ::= typename ident
// arraydec ::= typename ident "[" intconst "]"
// typename ::= "int" | "char"
// funtype ::= typename | "void"
// funbody ::= "{" locals stmts "}" | ";"
// formals ::= "void" | formal_list
// formal_list ::= formaldec | formaldec "," formal_list
// formaldec ::= scalardec | typename ident "[" "]"
// locals ::= /empty/ | vardec ";" locals
// stmts ::= /empty/ | stmt stmts
// stmt ::= expr ";"
// | "return" expr ";" | "return" ";"
// | "while" condition stmt
// | "if" condition stmt else_part
// | "{" stmts "}"
// | ";"
// else_part ::= /empty/ | "else" stmt
// condition ::= "(" expr ")"
// expr ::= intconst
// | ident | ident "[" expr "]"
// | unop expr
// | expr binop expr
// | ident "(" actuals ")"
// | "(" expr ")"
// unop ::= "-" | "!"
// binop ::= "+" | "-" | "*" | "/"
// | "<" | ">" | "<=" | ">=" | "!=" | "=="
// | "&&"
// | "="
// actuals ::= /empty/ | expr_list
// expr_list ::= expr | expr "," expr_list
type Program struct {
Pos lexer.Position
TopDec []*TopDec `@@*`
}
type TopDec struct {
Pos lexer.Position
FunDec *FunDec ` @@`
VarDec *VarDec `| @@ ";"`
}
type VarDec struct {
Pos lexer.Position
ArrayDec *ArrayDec ` @@`
ScalarDec *ScalarDec `| @@`
}
type ScalarDec struct {
Pos lexer.Position
Type string `@Type`
Name string `@Ident`
}
type ArrayDec struct {
Pos lexer.Position
Type string `@Type`
Name string `@Ident`
Size int `"[" @Int "]"`
}
type ReturnStmt struct {
Pos lexer.Position
Result *Expr `"return" @@?`
}
type WhileStmt struct {
Pos lexer.Position
Condition *Expr `"while" "(" @@ ")"`
Body *Stmt `@@`
}
type IfStmt struct {
Pos lexer.Position
Condition *Expr `"if" "(" @@ ")"`
Body *Stmt `@@`
Else *Stmt `("else" @@)?`
}
type Stmts struct {
Pos lexer.Position
Stmts []*Stmt `@@*`
}
type Stmt struct {
Pos lexer.Position
IfStmt *IfStmt ` @@`
ReturnStmt *ReturnStmt `| @@`
WhileStmt *WhileStmt `| @@`
Block *Stmts `| "{" @@ "}"`
Expr *Expr `| @@`
Empty bool `| @";"`
}
type FunBody struct {
Pos lexer.Position
Locals []*VarDec `(@@ ";")*`
Stmts *Stmts `@@`
}
type FunDec struct {
Pos lexer.Position
ReturnType string `@(Type | "void")`
Name string `@Ident`
Parameters []*Parameter `"(" ((@@ ("," @@)*) | "void") ")"`
FunBody *FunBody `(";" | "{" @@ "}")`
}
type Parameter struct {
Pos lexer.Position
Array *ArrayParameter ` @@`
Scalar *ScalarDec `| @@`
}
type ArrayParameter struct {
Pos lexer.Position
Type string `@Type`
Ident string `@Ident "[" "]"`
}
type Expr struct {
Pos lexer.Position
Assignment *Assignment `@@`
}
type Assignment struct {
Pos lexer.Position
Equality *Equality `@@`
Op string `( @"="`
Next *Equality ` @@ )?`
}
type Equality struct {
Pos lexer.Position
Comparison *Comparison `@@`
Op string `[ @( "!" "=" | "=" "=" )`
Next *Equality ` @@ ]`
}
type Comparison struct {
Pos lexer.Position
Addition *Addition `@@`
Op string `[ @( ">" "=" | ">" | "<" "=" | "<" )`
Next *Comparison ` @@ ]`
}
type Addition struct {
Pos lexer.Position
Multiplication *Multiplication `@@`
Op string `[ @( "-" | "+" )`
Next *Addition ` @@ ]`
}
type Multiplication struct {
Pos lexer.Position
Unary *Unary `@@`
Op string `[ @( "/" | "*" )`
Next *Multiplication ` @@ ]`
}
type Unary struct {
Pos lexer.Position
Op string ` ( @( "!" | "-" )`
Unary *Unary ` @@ )`
Primary *Primary `| @@`
}
type Primary struct {
Pos lexer.Position
Number *int ` @Int`
ArrayIndex *ArrayIndex `| @@`
CallFunc *CallFunc `| @@`
Ident string `| @Ident`
SubExpression *Expr `| "(" @@ ")" `
}
type ArrayIndex struct {
Pos lexer.Position
Ident string `@Ident`
Index []*Expr `("[" @@ "]")+`
}
type CallFunc struct {
Pos lexer.Position
Ident string `@Ident`
Index []*Expr `"(" (@@ ("," @@)*)? ")"`
}
var (
lex = lexer.MustSimple([]lexer.SimpleRule{
{"comment", `//.*|/\*.*?\*/`},
{"whitespace", `\s+`},
{"Type", `\b(int|char)\b`},
{"Ident", `\b([a-zA-Z_][a-zA-Z0-9_]*)\b`},
{"Punct", `[-,()*/+%{};&!=:<>]|\[|\]`},
{"Int", `\d+`},
})
parser = participle.MustBuild[Program](
participle.Lexer(lex),
participle.UseLookahead(2))
)
const sample = `
/* This is an example uC program. */
void putint(int i);
int fac(int n)
{
if (n < 2)
return n;
return n * fac(n - 1);
}
int sum(int n, int a[])
{
int i;
int s;
i = 0;
s = 0;
while (i <= n) {
s = s + a[i];
i = i + 1;
}
return s;
}
int main(void)
{
int a[2];
a[0] = fac(5);
a[1] = 27;
putint(sum(2, a)); // prints 147
return 0;
}
`
func main() {
ast, err := parser.ParseString("", sample)
repr.Println(ast)
if err != nil {
panic(err)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/microc/main_test.go 0000664 0000000 0000000 00000000710 15053003664 0026605 0 ustar 00root root 0000000 0000000 package main
import (
"strings"
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
program, err := parser.ParseString("", sample)
require.NoError(t, err)
repr.Println(program)
}
func BenchmarkParser(b *testing.B) {
src := strings.Repeat(sample, 10)
b.ReportAllocs()
b.ReportMetric(float64(len(src)*b.N), "B/s")
for i := 0; i < b.N; i++ {
_, _ = parser.ParseString("", src)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/precedenceclimbing/ 0000775 0000000 0000000 00000000000 15053003664 0026623 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/precedenceclimbing/main.go 0000664 0000000 0000000 00000005131 15053003664 0030076 0 ustar 00root root 0000000 0000000 // Package main shows an example of how to add precedence climbing to a Participle parser.
//
// Precedence climbing is an approach to parsing expressions that efficiently
// produces compact parse trees.
//
// In contrast, naive recursive descent expression parsers produce parse trees proportional in
// complexity to the number of operators supported. This impacts both readability and
// performance.
//
// It is based on https://eli.thegreenplace.net/2012/08/02/parsing-expressions-by-precedence-climbing
package main
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
type opInfo struct {
RightAssociative bool
Priority int
}
var info = map[string]opInfo{
"+": {Priority: 1},
"-": {Priority: 1},
"*": {Priority: 2},
"/": {Priority: 2},
"^": {RightAssociative: true, Priority: 3},
}
type Expr struct {
Terminal *int
Left *Expr
Op string
Right *Expr
}
func (e *Expr) String() string {
if e.Left != nil {
return fmt.Sprintf("(%s %s %s)", e.Left, e.Op, e.Right)
}
return fmt.Sprintf("%d", *e.Terminal)
}
func (e *Expr) Parse(lex *lexer.PeekingLexer) error {
*e = *parseExpr(lex, 0)
return nil
}
// (1 + 2) * 3
func parseExpr(lex *lexer.PeekingLexer, minPrec int) *Expr {
lhs := parseAtom(lex)
for {
tok := peek(lex)
if tok.EOF() || !isOp(rune(tok.Type)) || info[tok.Value].Priority < minPrec {
break
}
op := tok.Value
nextMinPrec := info[op].Priority
if !info[op].RightAssociative {
nextMinPrec++
}
lex.Next()
rhs := parseExpr(lex, nextMinPrec)
lhs = parseOp(op, lhs, rhs)
}
return lhs
}
func parseAtom(lex *lexer.PeekingLexer) *Expr {
tok := peek(lex)
if tok.Type == '(' {
lex.Next()
val := parseExpr(lex, 1)
if peek(lex).Value != ")" {
panic("unmatched (")
}
lex.Next()
return val
} else if tok.EOF() {
panic("unexpected EOF")
} else if isOp(rune(tok.Type)) {
panic("expected a terminal not " + tok.String())
} else {
lex.Next()
n, err := strconv.ParseInt(tok.Value, 10, 64)
if err != nil {
panic("invalid number " + tok.Value)
}
in := int(n)
return &Expr{Terminal: &in}
}
}
func isOp(rn rune) bool {
return strings.ContainsRune("+-*/^", rn)
}
func peek(lex *lexer.PeekingLexer) *lexer.Token {
return lex.Peek()
}
func parseOp(op string, lhs *Expr, rhs *Expr) *Expr {
return &Expr{
Op: op,
Left: lhs,
Right: rhs,
}
}
var parser = participle.MustBuild[Expr]()
func main() {
e, err := parser.ParseString("", strings.Join(os.Args[1:], " "))
fmt.Println(e)
repr.Println(e)
if err != nil {
panic(err)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/precedenceclimbing/main_test.go 0000664 0000000 0000000 00000001003 15053003664 0031127 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestExe(t *testing.T) {
actual, err := parser.ParseString("", `1 + 2 - 3 * (4 + 2)`)
require.NoError(t, err)
expected := expr(
expr(intp(1), "+", intp(2)),
"-",
expr(intp(3),
"*",
expr(intp(4), "+", intp(2))))
require.Equal(t, expected, actual)
}
func expr(l *Expr, op string, r *Expr) *Expr { return &Expr{Left: l, Op: op, Right: r} }
func intp(n int) *Expr { return &Expr{Terminal: &n} }
golang-github-alecthomas-participle-v2-2.1.4/_examples/protobuf/ 0000775 0000000 0000000 00000000000 15053003664 0024661 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/protobuf/example.proto 0000664 0000000 0000000 00000000612 15053003664 0027400 0 ustar 00root root 0000000 0000000 syntax = "proto3";
package test.test;
message SearchRequest {
string query = 1;
int32 page_number = 2;
int32 result_per_page = 3;
map scores = 4;
message Foo {}
enum Bar {
FOO = 0;
}
}
message SearchResponse {
string results = 1;
}
enum Type {
INT = 0;
DOUBLE = 1;
}
service SearchService {
rpc Search(SearchRequest) returns (SearchResponse);
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/protobuf/main.go 0000664 0000000 0000000 00000012623 15053003664 0026140 0 ustar 00root root 0000000 0000000 // nolint: govet, golint
package main
import (
"fmt"
"os"
"github.com/alecthomas/kong"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
type Proto struct {
Pos lexer.Position
Entries []*Entry `( @@ ";"* )*`
}
type Entry struct {
Pos lexer.Position
Syntax string ` "syntax" "=" @String`
Package string `| "package" @(Ident ( "." Ident )*)`
Import string `| "import" @String`
Message *Message `| @@`
Service *Service `| @@`
Enum *Enum `| @@`
Option *Option `| "option" @@`
Extend *Extend `| @@`
}
type Option struct {
Pos lexer.Position
Name string `( "(" @Ident @( "." Ident )* ")" | @Ident @( "." @Ident )* )`
Attr *string `( "." @Ident ( "." @Ident )* )?`
Value *Value `"=" @@`
}
type Value struct {
Pos lexer.Position
String *string ` @String`
Number *float64 `| @Float`
Int *int64 `| @Int`
Bool *bool `| (@"true" | "false")`
Reference *string `| @Ident @( "." Ident )*`
Map *Map `| @@`
Array *Array `| @@`
}
type Array struct {
Pos lexer.Position
Elements []*Value `"[" ( @@ ( ","? @@ )* )? "]"`
}
type Map struct {
Pos lexer.Position
Entries []*MapEntry `"{" ( @@ ( ( "," )? @@ )* )? "}"`
}
type MapEntry struct {
Pos lexer.Position
Key *Value `@@`
Value *Value `":"? @@`
}
type Extensions struct {
Pos lexer.Position
Extensions []Range `"extensions" @@ ( "," @@ )*`
}
type Reserved struct {
Pos lexer.Position
Reserved []Range `"reserved" @@ ( "," @@ )*`
}
type Range struct {
Ident string ` @String`
Start int `| ( @Int`
End *int ` ( "to" ( @Int`
Max bool ` | @"max" ) )? )`
}
type Extend struct {
Pos lexer.Position
Reference string `"extend" @Ident ( "." @Ident )*`
Fields []*Field `"{" ( @@ ";"? )* "}"`
}
type Service struct {
Pos lexer.Position
Name string `"service" @Ident`
Entry []*ServiceEntry `"{" ( @@ ";"? )* "}"`
}
type ServiceEntry struct {
Pos lexer.Position
Option *Option ` "option" @@`
Method *Method `| @@`
}
type Method struct {
Pos lexer.Position
Name string `"rpc" @Ident`
StreamingRequest bool `"(" @"stream"?`
Request *Type ` @@ ")"`
StreamingResponse bool `"returns" "(" @"stream"?`
Response *Type ` @@ ")"`
Options []*Option `( "{" ( "option" @@ ";" )* "}" )?`
}
type Enum struct {
Pos lexer.Position
Name string `"enum" @Ident`
Values []*EnumEntry `"{" ( @@ ( ";" )* )* "}"`
}
type EnumEntry struct {
Pos lexer.Position
Value *EnumValue ` @@`
Option *Option `| "option" @@`
}
type EnumValue struct {
Pos lexer.Position
Key string `@Ident`
Value int `"=" @( [ "-" ] Int )`
Options []*Option `( "[" @@ ( "," @@ )* "]" )?`
}
type Message struct {
Pos lexer.Position
Name string `"message" @Ident`
Entries []*MessageEntry `"{" @@* "}"`
}
type MessageEntry struct {
Pos lexer.Position
Enum *Enum `( @@`
Option *Option ` | "option" @@`
Message *Message ` | @@`
Oneof *Oneof ` | @@`
Extend *Extend ` | @@`
Reserved *Reserved ` | @@`
Extensions *Extensions ` | @@`
Field *Field ` | @@ ) ";"*`
}
type Oneof struct {
Pos lexer.Position
Name string `"oneof" @Ident`
Entries []*OneofEntry `"{" ( @@ ";"* )* "}"`
}
type OneofEntry struct {
Pos lexer.Position
Field *Field ` @@`
Option *Option `| "option" @@`
}
type Field struct {
Pos lexer.Position
Optional bool `( @"optional"`
Required bool ` | @"required"`
Repeated bool ` | @"repeated" )?`
Type *Type `@@`
Name string `@Ident`
Tag int `"=" @Int`
Options []*Option `( "[" @@ ( "," @@ )* "]" )?`
}
type Scalar int
const (
None Scalar = iota
Double
Float
Int32
Int64
Uint32
Uint64
Sint32
Sint64
Fixed32
Fixed64
SFixed32
SFixed64
Bool
String
Bytes
)
var scalarToString = map[Scalar]string{
None: "None", Double: "Double", Float: "Float", Int32: "Int32", Int64: "Int64", Uint32: "Uint32",
Uint64: "Uint64", Sint32: "Sint32", Sint64: "Sint64", Fixed32: "Fixed32", Fixed64: "Fixed64",
SFixed32: "SFixed32", SFixed64: "SFixed64", Bool: "Bool", String: "String", Bytes: "Bytes",
}
func (s Scalar) GoString() string { return scalarToString[s] }
var stringToScalar = map[string]Scalar{
"double": Double, "float": Float, "int32": Int32, "int64": Int64, "uint32": Uint32, "uint64": Uint64,
"sint32": Sint32, "sint64": Sint64, "fixed32": Fixed32, "fixed64": Fixed64, "sfixed32": SFixed32,
"sfixed64": SFixed64, "bool": Bool, "string": String, "bytes": Bytes,
}
func (s *Scalar) Parse(lex *lexer.PeekingLexer) error {
token := lex.Peek()
v, ok := stringToScalar[token.Value]
if !ok {
return participle.NextMatch
}
lex.Next()
*s = v
return nil
}
type Type struct {
Pos lexer.Position
Scalar Scalar ` @@`
Map *MapType `| @@`
Reference string `| @(Ident ( "." Ident )*)`
}
type MapType struct {
Pos lexer.Position
Key *Type `"map" "<" @@`
Value *Type `"," @@ ">"`
}
var (
parser = participle.MustBuild[Proto](participle.UseLookahead(2))
cli struct {
Files []string `required existingfile arg help:"Protobuf files."`
}
)
func main() {
ctx := kong.Parse(&cli)
for _, file := range cli.Files {
fmt.Println(file)
r, err := os.Open(file)
ctx.FatalIfErrorf(err, "")
proto, err := parser.Parse("", r)
ctx.FatalIfErrorf(err, "")
repr.Println(proto, repr.Hide[lexer.Position]())
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/protobuf/main_test.go 0000664 0000000 0000000 00000001072 15053003664 0027173 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestExe(t *testing.T) {
_, err := parser.ParseString("", `
syntax = "proto3";
package test.test;
message SearchRequest {
string query = 1;
int32 page_number = 2;
int32 result_per_page = 3;
map scores = 4;
message Foo {}
enum Bar {
FOO = 0;
}
}
message SearchResponse {
string results = 1;
}
enum Type {
INT = 0;
DOUBLE = 1;
}
service SearchService {
rpc Search(SearchRequest) returns (SearchResponse);
}
`)
require.NoError(t, err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/simpleexpr/ 0000775 0000000 0000000 00000000000 15053003664 0025211 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/simpleexpr/main.go 0000664 0000000 0000000 00000002231 15053003664 0026462 0 ustar 00root root 0000000 0000000 package main
import (
"github.com/alecthomas/kong"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
)
type Expr struct {
Lhs *Value `@@`
Tail []*Oper `@@*`
}
type Oper struct {
Op string `@( "|" "|" | "&" "&" | "!" "=" | ("!"|"="|"<"|">") "="? | "+" | "-" | "/" | "*" )`
Rhs *Value `@@`
}
type Value struct {
Number *float64 ` @Float | @Int`
String *string `| @String`
Bool *string `| ( @"true" | "false" )`
Nil bool `| @"nil"`
SubExpression *Expr `| "(" @@ ")" `
}
var (
cli struct {
Expr string `arg:"" help:"Expression."`
}
parser = participle.MustBuild[Expr]()
)
func main() {
kctx := kong.Parse(&cli, kong.Description(`
A simple expression parser that does not capture precedence at all. Precedence
must be applied at the evaluation phase.
The advantage of this approach over expr1, which does encode precedence in
the parser, is that it is significantly less complex and less nested. The
advantage of this over the "precedenceclimbing" example is that no custom
parsing is required.
`))
expr, err := parser.ParseString("", cli.Expr)
kctx.FatalIfErrorf(err)
repr.Println(expr)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/simpleexpr/main_test.go 0000664 0000000 0000000 00000000367 15053003664 0027531 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
expr, err := parser.ParseString("", `1 + 2 / 3 * (1 + 2)`)
repr.Println(expr)
require.NoError(t, err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/sql/ 0000775 0000000 0000000 00000000000 15053003664 0023620 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/sql/main.go 0000664 0000000 0000000 00000010536 15053003664 0025100 0 ustar 00root root 0000000 0000000 // nolint: govet
package main
import (
"github.com/alecthomas/kong"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
type Boolean bool
func (b *Boolean) Capture(values []string) error {
*b = values[0] == "TRUE"
return nil
}
// Select based on http://www.h2database.com/html/grammar.html
type Select struct {
Top *Term `"SELECT" ( "TOP" @@ )?`
Distinct bool `( @"DISTINCT"`
All bool ` | @"ALL" )?`
Expression *SelectExpression `@@`
From *From `"FROM" @@`
Limit *Expression `( "LIMIT" @@ )?`
Offset *Expression `( "OFFSET" @@ )?`
GroupBy *Expression `( "GROUP" "BY" @@ )?`
}
type From struct {
TableExpressions []*TableExpression `@@ ( "," @@ )*`
Where *Expression `( "WHERE" @@ )?`
}
type TableExpression struct {
Table string `( @Ident ( "." @Ident )*`
Select *Select ` | "(" @@ ")"`
Values []*Expression ` | "VALUES" "(" @@ ( "," @@ )* ")")`
As string `( "AS" @Ident )?`
}
type SelectExpression struct {
All bool ` @"*"`
Expressions []*AliasedExpression `| @@ ( "," @@ )*`
}
type AliasedExpression struct {
Expression *Expression `@@`
As string `( "AS" @Ident )?`
}
type Expression struct {
Or []*OrCondition `@@ ( "OR" @@ )*`
}
type OrCondition struct {
And []*Condition `@@ ( "AND" @@ )*`
}
type Condition struct {
Operand *ConditionOperand ` @@`
Not *Condition `| "NOT" @@`
Exists *Select `| "EXISTS" "(" @@ ")"`
}
type ConditionOperand struct {
Operand *Operand `@@`
ConditionRHS *ConditionRHS `@@?`
}
type ConditionRHS struct {
Compare *Compare ` @@`
Is *Is `| "IS" @@`
Between *Between `| "BETWEEN" @@`
In *In `| "IN" "(" @@ ")"`
Like *Like `| "LIKE" @@`
}
type Compare struct {
Operator string `@( "<>" | "<=" | ">=" | "=" | "<" | ">" | "!=" )`
Operand *Operand `( @@`
Select *CompareSelect ` | @@ )`
}
type CompareSelect struct {
All bool `( @"ALL"`
Any bool ` | @"ANY"`
Some bool ` | @"SOME" )`
Select *Select `"(" @@ ")"`
}
type Like struct {
Not bool `[ @"NOT" ]`
Operand *Operand `@@`
}
type Is struct {
Not bool `[ @"NOT" ]`
Null bool `( @"NULL"`
DistinctFrom *Operand ` | "DISTINCT" "FROM" @@ )`
}
type Between struct {
Start *Operand `@@`
End *Operand `"AND" @@`
}
type In struct {
Select *Select ` @@`
Expressions []*Expression `| @@ ( "," @@ )*`
}
type Operand struct {
Summand []*Summand `@@ ( "|" "|" @@ )*`
}
type Summand struct {
LHS *Factor `@@`
Op string `[ @("+" | "-")`
RHS *Factor ` @@ ]`
}
type Factor struct {
LHS *Term `@@`
Op string `( @("*" | "/" | "%")`
RHS *Term ` @@ )?`
}
type Term struct {
Select *Select ` @@`
Value *Value `| @@`
SymbolRef *SymbolRef `| @@`
SubExpression *Expression `| "(" @@ ")"`
}
type SymbolRef struct {
Symbol string `@Ident @( "." Ident )*`
Parameters []*Expression `( "(" @@ ( "," @@ )* ")" )?`
}
type Value struct {
Wildcard bool `( @"*"`
Number *float64 ` | @Number`
String *string ` | @String`
Boolean *Boolean ` | @("TRUE" | "FALSE")`
Null bool ` | @"NULL"`
Array *Array ` | @@ )`
}
type Array struct {
Expressions []*Expression `"(" @@ ( "," @@ )* ")"`
}
var (
cli struct {
SQL string `arg:"" required:"" help:"SQL to parse."`
}
sqlLexer = lexer.MustSimple([]lexer.SimpleRule{
{`Keyword`, `(?i)\b(SELECT|FROM|TOP|DISTINCT|ALL|WHERE|GROUP|BY|HAVING|UNION|MINUS|EXCEPT|INTERSECT|ORDER|LIMIT|OFFSET|TRUE|FALSE|NULL|IS|NOT|ANY|SOME|BETWEEN|AND|OR|LIKE|AS|IN)\b`},
{`Ident`, `[a-zA-Z_][a-zA-Z0-9_]*`},
{`Number`, `[-+]?\d*\.?\d+([eE][-+]?\d+)?`},
{`String`, `'[^']*'|"[^"]*"`},
{`Operators`, `<>|!=|<=|>=|[-+*/%,.()=<>]`},
{"whitespace", `\s+`},
})
parser = participle.MustBuild[Select](
participle.Lexer(sqlLexer),
participle.Unquote("String"),
participle.CaseInsensitive("Keyword"),
// participle.Elide("Comment"),
// Need to solve left recursion detection first, if possible.
// participle.UseLookahead(),
)
)
func main() {
ctx := kong.Parse(&cli)
sql, err := parser.ParseString("", cli.SQL)
repr.Println(sql, repr.Indent(" "), repr.OmitEmpty(true))
ctx.FatalIfErrorf(err)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/sql/main_test.go 0000664 0000000 0000000 00000000405 15053003664 0026131 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
sel, err := parser.ParseString("", `SELECT * FROM table WHERE attr = 10`)
require.NoError(t, err)
repr.Println(sel)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/stateful/ 0000775 0000000 0000000 00000000000 15053003664 0024650 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/stateful/main.go 0000664 0000000 0000000 00000002237 15053003664 0026127 0 ustar 00root root 0000000 0000000 package main
import (
"log"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
type Terminal struct {
String *String ` @@`
Ident string `| @Ident`
}
type Expr struct {
Left *Terminal `@@`
Op string `( @Oper`
Right *Terminal ` @@)?`
}
type Fragment struct {
Escaped string `( @Escaped`
Expr *Expr ` | "${" @@ "}"`
Text string ` | @Char)`
}
type String struct {
Fragments []*Fragment `"\"" @@* "\""`
}
var (
def = lexer.MustStateful(lexer.Rules{
"Root": {
{`String`, `"`, lexer.Push("String")},
},
"String": {
{"Escaped", `\\.`, nil},
{"StringEnd", `"`, lexer.Pop()},
{"Expr", `\${`, lexer.Push("Expr")},
{"Char", `\$|[^$"\\]+`, nil},
},
"Expr": {
lexer.Include("Root"),
{`Whitespace`, `\s+`, nil},
{`Oper`, `[-+/*%]`, nil},
{"Ident", `\w+`, nil},
{"ExprEnd", `}`, lexer.Pop()},
},
})
parser = participle.MustBuild[String](participle.Lexer(def),
participle.Elide("Whitespace"))
)
func main() {
actual, err := parser.ParseString("", `"hello $(world) ${first + "${last}"}"`)
repr.Println(actual)
if err != nil {
log.Fatal(err)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/stateful/main_test.go 0000664 0000000 0000000 00000000365 15053003664 0027166 0 ustar 00root root 0000000 0000000 package main
import (
"log"
"testing"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
actual, err := parser.ParseString("", `"hello $(world) ${first + "${last}"}"`)
if err != nil {
log.Fatal(err)
}
repr.Println(actual)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/thrift/ 0000775 0000000 0000000 00000000000 15053003664 0024321 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/thrift/lexer_gen.go 0000664 0000000 0000000 00000015537 15053003664 0026633 0 ustar 00root root 0000000 0000000 // Code generated by Participle. DO NOT EDIT.
package main
import (
"io"
"regexp/syntax"
"strings"
"unicode/utf8"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var _ syntax.Op
var Lexer lexer.Definition = definitionImpl{}
type definitionImpl struct{}
func (definitionImpl) Symbols() map[string]lexer.TokenType {
return map[string]lexer.TokenType{
"Comment": -7,
"EOF": -1,
"Ident": -3,
"Number": -2,
"Punct": -6,
"String": -4,
"Whitespace": -5,
}
}
func (definitionImpl) LexString(filename string, s string) (lexer.Lexer, error) {
return &lexerImpl{
s: s,
pos: lexer.Position{
Filename: filename,
Line: 1,
Column: 1,
},
states: []lexerState{{name: "Root"}},
}, nil
}
func (d definitionImpl) LexBytes(filename string, b []byte) (lexer.Lexer, error) {
return d.LexString(filename, string(b))
}
func (d definitionImpl) Lex(filename string, r io.Reader) (lexer.Lexer, error) {
s := &strings.Builder{}
_, err := io.Copy(s, r)
if err != nil {
return nil, err
}
return d.LexString(filename, s.String())
}
type lexerState struct {
name string
groups []string
}
type lexerImpl struct {
s string
p int
pos lexer.Position
states []lexerState
}
func (l *lexerImpl) Next() (lexer.Token, error) {
if l.p == len(l.s) {
return lexer.EOFToken(l.pos), nil
}
var (
state = l.states[len(l.states)-1]
groups []int
sym lexer.TokenType
)
switch state.name {
case "Root":
if match := matchNumber(l.s, l.p); match[1] != 0 {
sym = -2
groups = match[:]
} else if match := matchIdent(l.s, l.p); match[1] != 0 {
sym = -3
groups = match[:]
} else if match := matchString(l.s, l.p); match[1] != 0 {
sym = -4
groups = match[:]
} else if match := matchWhitespace(l.s, l.p); match[1] != 0 {
sym = -5
groups = match[:]
} else if match := matchPunct(l.s, l.p); match[1] != 0 {
sym = -6
groups = match[:]
} else if match := matchComment(l.s, l.p); match[1] != 0 {
sym = -7
groups = match[:]
}
}
if groups == nil {
sample := []rune(l.s[l.p:])
if len(sample) > 16 {
sample = append(sample[:16], []rune("...")...)
}
return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", sample)
}
pos := l.pos
span := l.s[groups[0]:groups[1]]
l.p = groups[1]
l.pos.Advance(span)
return lexer.Token{
Type: sym,
Value: span,
Pos: pos,
}, nil
}
func (l *lexerImpl) sgroups(match []int) []string {
sgroups := make([]string, len(match)/2)
for i := 0; i < len(match)-1; i += 2 {
sgroups[i/2] = l.s[l.p+match[i] : l.p+match[i+1]]
}
return sgroups
}
// [0-9]+
func matchNumber(s string, p int) (groups [2]int) {
// [0-9] (CharClass)
l0 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
switch {
case rn >= '0' && rn <= '9':
return p + 1
}
return -1
}
// [0-9]+ (Plus)
l1 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
for len(s) > p {
if np := l0(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
np := l1(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// [0-9A-Z_a-z]+
func matchIdent(s string, p int) (groups [2]int) {
// [0-9A-Z_a-z] (CharClass)
l0 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
switch {
case rn >= '0' && rn <= '9':
return p + 1
case rn >= 'A' && rn <= 'Z':
return p + 1
case rn == '_':
return p + 1
case rn >= 'a' && rn <= 'z':
return p + 1
}
return -1
}
// [0-9A-Z_a-z]+ (Plus)
l1 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
for len(s) > p {
if np := l0(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
np := l1(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// "[^"]*"
func matchString(s string, p int) (groups [2]int) {
// " (Literal)
l0 := func(s string, p int) int {
if p < len(s) && s[p] == '"' {
return p + 1
}
return -1
}
// [^"] (CharClass)
l1 := func(s string, p int) int {
if len(s) <= p {
return -1
}
var (
rn rune
n int
)
if s[p] < utf8.RuneSelf {
rn, n = rune(s[p]), 1
} else {
rn, n = utf8.DecodeRuneInString(s[p:])
}
switch {
case rn >= '\x00' && rn <= '!':
return p + 1
case rn >= '#' && rn <= '\U0010ffff':
return p + n
}
return -1
}
// [^"]* (Star)
l2 := func(s string, p int) int {
for len(s) > p {
if np := l1(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
// "[^"]*" (Concat)
l3 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
if p = l2(s, p); p == -1 {
return -1
}
if p = l0(s, p); p == -1 {
return -1
}
return p
}
np := l3(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// [\t-\n\f-\r ]+
func matchWhitespace(s string, p int) (groups [2]int) {
// [\t-\n\f-\r ] (CharClass)
l0 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
switch {
case rn >= '\t' && rn <= '\n':
return p + 1
case rn >= '\f' && rn <= '\r':
return p + 1
case rn == ' ':
return p + 1
}
return -1
}
// [\t-\n\f-\r ]+ (Plus)
l1 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
for len(s) > p {
if np := l0(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
np := l1(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// [\(-\),\.:<->\{\}]
func matchPunct(s string, p int) (groups [2]int) {
// [\(-\),\.:<->\{\}] (CharClass)
l0 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
switch {
case rn >= '(' && rn <= ')':
return p + 1
case rn == ',':
return p + 1
case rn == '.':
return p + 1
case rn == ':':
return p + 1
case rn >= '<' && rn <= '>':
return p + 1
case rn == '{':
return p + 1
case rn == '}':
return p + 1
}
return -1
}
np := l0(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// //(?-s:.)*
func matchComment(s string, p int) (groups [2]int) {
// // (Literal)
l0 := func(s string, p int) int {
if p+2 < len(s) && s[p:p+2] == "//" {
return p + 2
}
return -1
}
// (?-s:.) (AnyCharNotNL)
l1 := func(s string, p int) int {
var (
rn rune
n int
)
if s[p] < utf8.RuneSelf {
rn, n = rune(s[p]), 1
} else {
rn, n = utf8.DecodeRuneInString(s[p:])
}
if len(s) <= p+n || rn == '\n' {
return -1
}
return p + n
}
// (?-s:.)* (Star)
l2 := func(s string, p int) int {
for len(s) > p {
if np := l1(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
// //(?-s:.)* (Concat)
l3 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
if p = l2(s, p); p == -1 {
return -1
}
return p
}
np := l3(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/thrift/main.go 0000664 0000000 0000000 00000012610 15053003664 0025574 0 ustar 00root root 0000000 0000000 // Package main implements a parser for Thrift files (https://thrift.apache.org/)
//
// It parses namespaces, exceptions, services, structs, consts, typedefs and enums, but is easily
// extensible to more.
//
// It also supports annotations and method throws.
package main
import (
"fmt"
"os"
"strings"
"github.com/alecthomas/kong"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
type Namespace struct {
Pos lexer.Position
Language string `"namespace" @Ident`
Namespace string `@Ident ( @"." @Ident )*`
}
type Type struct {
Pos lexer.Position
Name string `@Ident ( @"." @Ident )*`
TypeOne *Type `( "<" @@ ( ","`
TypeTwo *Type ` @@ )? ">" )?`
}
type Annotation struct {
Pos lexer.Position
Key string `@Ident ( @"." @Ident )*`
Value *Literal `( "=" @@ )?`
}
type Field struct {
Pos lexer.Position
ID string `@Number ":"`
Requirement string `@( "optional" | "required" )?`
Type *Type `@@`
Name string `@Ident`
Default *Literal `( "=" @@ )?`
Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )? ";"?`
}
type Exception struct {
Pos lexer.Position
Name string `"exception" @Ident "{"`
Fields []*Field `@@ @@* "}"`
Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?`
}
type Struct struct {
Pos lexer.Position
Union bool `( "struct" | @"union" )`
Name string `@Ident "{"`
Fields []*Field `@@* "}"`
Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?`
}
type Argument struct {
Pos lexer.Position
ID string `@Number ":"`
Type *Type `@@`
Name string `@Ident`
}
type Throw struct {
Pos lexer.Position
ID string `@Number ":"`
Type *Type `@@`
Name string `@Ident`
}
type Method struct {
Pos lexer.Position
ReturnType *Type `@@`
Name string `@Ident`
Arguments []*Argument `"(" ( @@ ( "," @@ )* )? ")"`
Throws []*Throw `( "throws" "(" @@ ( "," @@ )* ")" )?`
Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?`
}
type Service struct {
Pos lexer.Position
Name string `"service" @Ident`
Extends string `( "extends" @Ident ( @"." @Ident )* )?`
Methods []*Method `"{" ( @@ ";"? )* "}"`
Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?`
}
// Literal is a "union" type, where only one matching value will be present.
type Literal struct {
Pos lexer.Position
Str *string ` @String`
Number *float64 `| @Number`
Bool *string `| @( "true" | "false" )`
Reference *string `| @Ident ( @"." @Ident )*`
Minus *Literal `| "-" @@`
List []*Literal `| "[" ( @@ ","? )* "]"`
Map []*MapItem `| "{" ( @@ ","? )* "}"`
}
func (l *Literal) GoString() string {
switch {
case l.Str != nil:
return fmt.Sprintf("%q", *l.Str)
case l.Number != nil:
return fmt.Sprintf("%v", *l.Number)
case l.Bool != nil:
return fmt.Sprintf("%v", *l.Bool)
case l.Reference != nil:
return fmt.Sprintf("%s", *l.Reference)
case l.Minus != nil:
return fmt.Sprintf("-%v", l.Minus)
case l.List != nil:
parts := []string{}
for _, e := range l.List {
parts = append(parts, e.GoString())
}
return fmt.Sprintf("[%s]", strings.Join(parts, ", "))
case l.Map != nil:
parts := []string{}
for _, e := range l.Map {
parts = append(parts, e.GoString())
}
return fmt.Sprintf("{%s}", strings.Join(parts, ", "))
}
panic("unsupported?")
}
type MapItem struct {
Pos lexer.Position
Key *Literal `@@ ":"`
Value *Literal `@@`
}
func (m *MapItem) GoString() string {
return fmt.Sprintf("%v: %v", m.Key, m.Value)
}
type Case struct {
Pos lexer.Position
Name string `@Ident`
Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?`
Value *Literal `( "=" @@ )? ( "," | ";" )?`
}
type Enum struct {
Pos lexer.Position
Name string `"enum" @Ident "{"`
Cases []*Case `@@* "}"`
Annotations []*Annotation `( "(" @@ ( "," @@ )* ")" )?`
}
type Typedef struct {
Pos lexer.Position
Type *Type `"typedef" @@`
Name string `@Ident`
}
type Const struct {
Pos lexer.Position
Type *Type `"const" @@`
Name string `@Ident`
Value *Literal `"=" @@ ";"?`
}
type Entry struct {
Pos lexer.Position
Includes []string ` "include" @String`
Namespaces []*Namespace `| @@`
Structs []*Struct `| @@`
Exceptions []*Exception `| @@`
Services []*Service `| @@`
Enums []*Enum `| @@`
Typedefs []*Typedef `| @@`
Consts []*Const `| @@`
}
// Thrift files consist of a set of top-level directives and definitions.
//
// The grammar
type Thrift struct {
Pos lexer.Position
Entries []*Entry `@@*`
}
var (
def = lexer.MustSimple([]lexer.SimpleRule{
{"Number", `\d+`},
{"Ident", `\w+`},
{"String", `"[^"]*"`},
{"Whitespace", `\s+`},
{"Punct", `[,.<>(){}=:]`},
{"Comment", `//.*`},
})
parser = participle.MustBuild[Thrift](
participle.Lexer(def),
participle.Unquote(),
participle.Elide("Whitespace"),
)
)
func main() {
var cli struct {
Gen bool `help:"Generate lexer."`
Files []string `help:"Thrift files."`
}
ctx := kong.Parse(&cli)
for _, file := range cli.Files {
r, err := os.Open(file)
ctx.FatalIfErrorf(err, "")
thrift, err := parser.Parse("", r)
ctx.FatalIfErrorf(err, "")
repr.Println(thrift)
}
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/thrift/main_test.go 0000664 0000000 0000000 00000004341 15053003664 0026635 0 ustar 00root root 0000000 0000000 package main
import (
"strings"
"testing"
"time"
require "github.com/alecthomas/assert/v2"
thriftparser "github.com/alecthomas/go-thrift/parser"
"github.com/alecthomas/participle/v2"
)
var (
source = strings.TrimSpace(`
namespace cpp thrift.example
namespace java thrift.example
enum TweetType {
TWEET
RETWEET = 2
DM = 3
REPLY
}
struct Location {
1: required double latitude
2: required double longitude
}
struct Tweet {
1: required i32 userId
2: required string userName
3: required string text
4: optional Location loc
5: optional TweetType tweetType = TweetType.TWEET
16: optional string language = "english"
}
typedef list TweetList
struct TweetSearchResult {
1: TweetList tweets
}
exception TwitterUnavailable {
1: string message
}
const i32 MAX_RESULTS = 100
service Twitter {
void ping()
bool postTweet(1:Tweet tweet) throws (1:TwitterUnavailable unavailable)
TweetSearchResult searchTweets(1:string query)
void zip()
}
`)
)
func BenchmarkParticipleThrift(b *testing.B) {
_, err := parser.ParseString("", source)
require.NoError(b, err)
b.ResetTimer()
b.ReportAllocs()
start := time.Now()
for i := 0; i < b.N; i++ {
_, _ = parser.ParseString("", source)
}
b.ReportMetric(float64(len(source)*b.N)*float64(time.Since(start)/time.Second)/1024/1024, "MiB/s")
}
func BenchmarkParticipleThriftGenerated(b *testing.B) {
parser := participle.MustBuild[Thrift](
participle.Lexer(Lexer),
participle.Unquote(),
participle.Elide("Whitespace"),
)
_, err := parser.ParseString("", source)
require.NoError(b, err)
b.ResetTimer()
b.ReportAllocs()
start := time.Now()
for i := 0; i < b.N; i++ {
_, _ = parser.ParseString("", source)
}
b.ReportMetric(float64(len(source)*b.N)*float64(time.Since(start)/time.Second)/1024/1024, "MiB/s")
}
func BenchmarkGoThriftParser(b *testing.B) {
_, err := thriftparser.ParseReader("user.thrift", strings.NewReader(source))
require.NoError(b, err)
b.ResetTimer()
b.ReportAllocs()
start := time.Now()
for i := 0; i < b.N; i++ {
_, _ = thriftparser.ParseReader("user.thrift", strings.NewReader(source))
}
b.ReportMetric(float64(len(source)*b.N)*float64(time.Since(start)/time.Second)/1024/1024, "MiB/s")
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/toml/ 0000775 0000000 0000000 00000000000 15053003664 0023774 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/_examples/toml/example.toml 0000664 0000000 0000000 00000001061 15053003664 0026322 0 ustar 00root root 0000000 0000000 # This is a TOML document.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
dob = 1979-05-27T07:32:00-08:00 # First class dates
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
enabled = false
[servers]
# Indentation (tabs and/or spaces) is allowed but not required
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ]
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
golang-github-alecthomas-participle-v2-2.1.4/_examples/toml/main.go 0000664 0000000 0000000 00000002761 15053003664 0025255 0 ustar 00root root 0000000 0000000 package main
import (
"os"
"github.com/alecthomas/kong"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
type TOML struct {
Pos lexer.Position
Entries []*Entry `@@*`
}
type Entry struct {
Field *Field ` @@`
Section *Section `| @@`
}
type Field struct {
Key string `@Ident "="`
Value *Value `@@`
}
type Value struct {
String *string ` @String`
DateTime *string `| @DateTime`
Date *string `| @Date`
Time *string `| @Time`
Bool *bool `| (@"true" | "false")`
Number *float64 `| @Number`
List []*Value `| "[" ( @@ ( "," @@ )* )? "]"`
}
type Section struct {
Name string `"[" @(Ident ( "." Ident )*) "]"`
Fields []*Field `@@*`
}
var (
tomlLexer = lexer.MustSimple([]lexer.SimpleRule{
{"DateTime", `\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?(-\d\d:\d\d)?`},
{"Date", `\d\d\d\d-\d\d-\d\d`},
{"Time", `\d\d:\d\d:\d\d(\.\d+)?`},
{"Ident", `[a-zA-Z_][a-zA-Z_0-9]*`},
{"String", `"[^"]*"`},
{"Number", `[-+]?[.0-9]+\b`},
{"Punct", `\[|]|[-!()+/*=,]`},
{"comment", `#[^\n]+`},
{"whitespace", `\s+`},
})
tomlParser = participle.MustBuild[TOML](
participle.Lexer(tomlLexer),
participle.Unquote("String"),
)
cli struct {
File string `help:"TOML file to parse." arg:""`
}
)
func main() {
ctx := kong.Parse(&cli)
r, err := os.Open(cli.File)
ctx.FatalIfErrorf(err)
defer r.Close()
toml, err := tomlParser.Parse(cli.File, r)
ctx.FatalIfErrorf(err)
repr.Println(toml)
}
golang-github-alecthomas-participle-v2-2.1.4/_examples/toml/main_test.go 0000664 0000000 0000000 00000001432 15053003664 0026306 0 ustar 00root root 0000000 0000000 package main
import (
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/repr"
)
func TestExe(t *testing.T) {
toml, err := tomlParser.ParseString("", `
# This is a TOML document.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
dob = 1979-05-27T07:32:00-08:00 # First class dates
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
enabled = false
[servers]
# Indentation (tabs and/or spaces) is allowed but not required
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ]
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
`)
require.NoError(t, err)
repr.Println(toml)
}
golang-github-alecthomas-participle-v2-2.1.4/api.go 0000664 0000000 0000000 00000001071 15053003664 0022143 0 ustar 00root root 0000000 0000000 package participle
import (
"github.com/alecthomas/participle/v2/lexer"
)
// Capture can be implemented by fields in order to transform captured tokens into field values.
type Capture interface {
Capture(values []string) error
}
// The Parseable interface can be implemented by any element in the grammar to provide custom parsing.
type Parseable interface {
// Parse into the receiver.
//
// Should return NextMatch if no tokens matched and parsing should continue.
// Nil should be returned if parsing was successful.
Parse(lex *lexer.PeekingLexer) error
}
golang-github-alecthomas-participle-v2-2.1.4/bin/ 0000775 0000000 0000000 00000000000 15053003664 0021614 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/bin/.go-1.23.5.pkg 0000777 0000000 0000000 00000000000 15053003664 0024734 2hermit ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/bin/.golangci-lint-1.63.4.pkg 0000777 0000000 0000000 00000000000 15053003664 0027061 2hermit ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/bin/.goreleaser-1.26.2.pkg 0000777 0000000 0000000 00000000000 15053003664 0026457 2hermit ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/bin/README.hermit.md 0000664 0000000 0000000 00000000416 15053003664 0024363 0 ustar 00root root 0000000 0000000 # Hermit environment
This is a [Hermit](https://github.com/cashapp/hermit) bin directory.
The symlinks in this directory are managed by Hermit and will automatically
download and install Hermit itself as well as packages. These packages are
local to this environment.
golang-github-alecthomas-participle-v2-2.1.4/bin/activate-hermit 0000775 0000000 0000000 00000001062 15053003664 0024627 0 ustar 00root root 0000000 0000000 #!/bin/bash
# This file must be used with "source bin/activate-hermit" from bash or zsh.
# You cannot run it directly
if [ "${BASH_SOURCE-}" = "$0" ]; then
echo "You must source this script: \$ source $0" >&2
exit 33
fi
BIN_DIR="$(dirname "${BASH_SOURCE[0]:-${(%):-%x}}")"
if "${BIN_DIR}/hermit" noop > /dev/null; then
eval "$("${BIN_DIR}/hermit" activate "${BIN_DIR}/..")"
if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ]; then
hash -r 2>/dev/null
fi
echo "Hermit environment $("${HERMIT_ENV}"/bin/hermit env HERMIT_ENV) activated"
fi
golang-github-alecthomas-participle-v2-2.1.4/bin/go 0000777 0000000 0000000 00000000000 15053003664 0024051 2.go-1.23.5.pkg ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/bin/gofmt 0000777 0000000 0000000 00000000000 15053003664 0024560 2.go-1.23.5.pkg ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/bin/golangci-lint 0000777 0000000 0000000 00000000000 15053003664 0030320 2.golangci-lint-1.63.4.pkg ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/bin/goreleaser 0000777 0000000 0000000 00000000000 15053003664 0027317 2.goreleaser-1.26.2.pkg ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/bin/hermit 0000775 0000000 0000000 00000001361 15053003664 0023033 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -eo pipefail
if [ -z "${HERMIT_STATE_DIR}" ]; then
case "$(uname -s)" in
Darwin)
export HERMIT_STATE_DIR="${HOME}/Library/Caches/hermit"
;;
Linux)
export HERMIT_STATE_DIR="${XDG_CACHE_HOME:-${HOME}/.cache}/hermit"
;;
esac
fi
export HERMIT_DIST_URL="${HERMIT_DIST_URL:-https://github.com/cashapp/hermit/releases/download/stable}"
HERMIT_CHANNEL="$(basename "${HERMIT_DIST_URL}")"
export HERMIT_CHANNEL
export HERMIT_EXE=${HERMIT_EXE:-${HERMIT_STATE_DIR}/pkg/hermit@${HERMIT_CHANNEL}/hermit}
if [ ! -x "${HERMIT_EXE}" ]; then
echo "Bootstrapping ${HERMIT_EXE} from ${HERMIT_DIST_URL}" 1>&2
curl -fsSL "${HERMIT_DIST_URL}/install.sh" | /bin/bash 1>&2
fi
exec "${HERMIT_EXE}" --level=fatal exec "$0" -- "$@"
golang-github-alecthomas-participle-v2-2.1.4/bin/hermit.hcl 0000664 0000000 0000000 00000000065 15053003664 0023575 0 ustar 00root root 0000000 0000000 env = {
"PATH": "${HERMIT_ENV}/scripts:${PATH}",
}
golang-github-alecthomas-participle-v2-2.1.4/cmd/ 0000775 0000000 0000000 00000000000 15053003664 0021607 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/ 0000775 0000000 0000000 00000000000 15053003664 0023743 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/codegen.go.tmpl 0000664 0000000 0000000 00000006031 15053003664 0026651 0 ustar 00root root 0000000 0000000 // Code generated by Participle. DO NOT EDIT.
{{if .Tags}}//go:build {{.Tags}}
{{end -}}
package {{.Package}}
import (
"fmt"
"io"
"strings"
"sync"
"unicode/utf8"
"regexp/syntax"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var _ syntax.Op
var _ fmt.State
const _ = utf8.RuneError
var {{.Name}}BackRefCache sync.Map
var {{.Name}}Lexer lexer.Definition = lexer{{.Name}}DefinitionImpl{}
type lexer{{.Name}}DefinitionImpl struct {}
func (lexer{{.Name}}DefinitionImpl) Symbols() map[string]lexer.TokenType {
return map[string]lexer.TokenType{
{{- range $sym, $rn := .Def.Symbols}}
"{{$sym}}": {{$rn}},
{{- end}}
}
}
func (lexer{{.Name}}DefinitionImpl) LexString(filename string, s string) (lexer.Lexer, error) {
return &lexer{{.Name}}Impl{
s: s,
pos: lexer.Position{
Filename: filename,
Line: 1,
Column: 1,
},
states: []lexer{{.Name}}State{ {name: "Root"} },
}, nil
}
func (d lexer{{.Name}}DefinitionImpl) LexBytes(filename string, b []byte) (lexer.Lexer, error) {
return d.LexString(filename, string(b))
}
func (d lexer{{.Name}}DefinitionImpl) Lex(filename string, r io.Reader) (lexer.Lexer, error) {
s := &strings.Builder{}
_, err := io.Copy(s, r)
if err != nil {
return nil, err
}
return d.LexString(filename, s.String())
}
type lexer{{.Name}}State struct {
name string
groups []string
}
type lexer{{.Name}}Impl struct {
s string
p int
pos lexer.Position
states []lexer{{.Name}}State
}
func (l *lexer{{.Name}}Impl) Next() (lexer.Token, error) {
if l.p == len(l.s) {
return lexer.EOFToken(l.pos), nil
}
var (
state = l.states[len(l.states)-1]
groups []int
sym lexer.TokenType
)
switch state.name {
{{- range $state := .Def.Rules|OrderRules}}
case "{{$state.Name}}":
{{- range $i, $rule := $state.Rules}}
{{- if $i}} else {{end -}}
{{- if .Pattern -}}
if match := match{{$.Name}}{{.Name}}(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 {
sym = {{index $.Def.Symbols .Name}}
groups = match[:]
{{- else if .|IsReturn -}}
if true {
{{- end}}
{{- if .|IsPush}}
l.states = append(l.states, lexer{{$.Name}}State{name: "{{.|IsPush}}"{{if HaveBackrefs $.Def $state.Name}}, groups: l.sgroups(groups){{end}}})
{{- else if (or (.|IsPop) (.|IsReturn))}}
l.states = l.states[:len(l.states)-1]
{{- if .|IsReturn}}
return l.Next()
{{- end}}
{{- else if not .Action}}
{{- else}}
Unsupported action {{.Action}}
{{- end}}
}
{{- end}}
{{- end}}
}
if groups == nil {
sample := []rune(l.s[l.p:])
if len(sample) > 16 {
sample = append(sample[:16], []rune("...")...)
}
return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", string(sample))
}
pos := l.pos
span := l.s[groups[0]:groups[1]]
l.p = groups[1]
l.pos.Advance(span)
return lexer.Token{
Type: sym,
Value: span,
Pos: pos,
}, nil
}
func (l *lexer{{.Name}}Impl) sgroups(match []int) []string {
sgroups := make([]string, len(match)/2)
for i := 0; i < len(match)-1; i += 2 {
sgroups[i/2] = l.s[l.p+match[i]:l.p+match[i+1]]
}
return sgroups
} golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/gen_lexer_cmd.go 0000664 0000000 0000000 00000027771 15053003664 0027103 0 ustar 00root root 0000000 0000000 package main
import (
_ "embed" // For go:embed.
"encoding/json"
"fmt"
"io"
"os"
"regexp"
"regexp/syntax"
"sort"
"text/template"
"unicode"
"unicode/utf8"
"github.com/alecthomas/participle/v2/lexer"
)
type genLexerCmd struct {
Name string `help:"Name of the lexer."`
Output string `short:"o" help:"Output file."`
Tags string `help:"Build tags to include in the generated file."`
Package string `arg:"" required:"" help:"Go package for generated code."`
Lexer *os.File `arg:"" default:"-" help:"JSON representation of a Participle lexer (read from stdin if omitted)."`
}
func (c *genLexerCmd) Help() string {
return `
Generates Go code implementing the given JSON representation of a lexer. The
generated code should in general by around 10x faster and produce zero garbage
per token.
`
}
func (c *genLexerCmd) Run() error {
rules := lexer.Rules{}
err := json.NewDecoder(c.Lexer).Decode(&rules)
if err != nil {
return err
}
def, err := lexer.New(rules)
if err != nil {
return err
}
out := os.Stdout
if c.Output != "" {
out, err = os.Create(c.Output)
if err != nil {
return err
}
defer out.Close()
}
err = generateLexer(out, c.Package, def, c.Name, c.Tags)
if err != nil {
return err
}
return nil
}
var (
//go:embed codegen.go.tmpl
codegenTemplateSource string
codegenBackrefRe = regexp.MustCompile(`(\\+)(\d)`)
codegenTemplate = template.Must(template.New("lexgen").Funcs(template.FuncMap{
"IsPush": func(r lexer.Rule) string {
if p, ok := r.Action.(lexer.ActionPush); ok {
return p.State
}
return ""
},
"IsPop": func(r lexer.Rule) bool {
_, ok := r.Action.(lexer.ActionPop)
return ok
},
"IsReturn": func(r lexer.Rule) bool {
return r == lexer.ReturnRule
},
"OrderRules": orderRules,
"HaveBackrefs": func(def *lexer.StatefulDefinition, state string) bool {
for _, rule := range def.Rules()[state] {
if codegenBackrefRe.MatchString(rule.Pattern) {
return true
}
}
return false
},
}).Parse(codegenTemplateSource))
)
func generateLexer(w io.Writer, pkg string, def *lexer.StatefulDefinition, name, tags string) error {
type ctx struct {
Package string
Name string
Tags string
Def *lexer.StatefulDefinition
}
rules := def.Rules()
err := codegenTemplate.Execute(w, ctx{pkg, name, tags, def})
if err != nil {
return err
}
seen := map[string]bool{} // Rules can be duplicated by Include().
for _, rules := range orderRules(rules) {
for _, rule := range rules.Rules {
if rule.Name == "" {
panic(rule)
}
if seen[rule.Name] {
continue
}
seen[rule.Name] = true
fmt.Fprintf(w, "\n")
err := generateRegexMatch(w, name, rule.Name, rule.Pattern)
if err != nil {
return err
}
}
}
return nil
}
type orderedRule struct {
Name string
Rules []lexer.Rule
}
func orderRules(rules lexer.Rules) []orderedRule {
orderedRules := []orderedRule{}
for name, rules := range rules {
orderedRules = append(orderedRules, orderedRule{
Name: name,
Rules: rules,
})
}
sort.Slice(orderedRules, func(i, j int) bool {
return orderedRules[i].Name < orderedRules[j].Name
})
return orderedRules
}
func generateRegexMatch(w io.Writer, lexerName, name, pattern string) error {
if codegenBackrefRe.FindStringIndex(pattern) != nil {
fmt.Fprintf(w, "func match%s%s(s string, p int, backrefs []string) (groups []int) {\n", lexerName, name)
fmt.Fprintf(w, " re, err := lexer.BackrefRegex(%sBackRefCache, %q, backrefs)\n", lexerName, pattern)
fmt.Fprintf(w, " if err != nil { panic(fmt.Sprintf(\"%%s: %%s\", err, backrefs)) }\n")
fmt.Fprintf(w, " return re.FindStringSubmatchIndex(s[p:])\n")
fmt.Fprintf(w, "}\n")
return nil
}
re, err := syntax.Parse(pattern, syntax.Perl)
if err != nil {
return err
}
ids := map[string]int{}
idn := 0
reid := func(re *syntax.Regexp) int {
key := re.Op.String() + ":" + re.String()
id, ok := ids[key]
if ok {
return id
}
id = idn
idn++
ids[key] = id
return id
}
exists := func(re *syntax.Regexp) bool {
key := re.Op.String() + ":" + re.String()
_, ok := ids[key]
return ok
}
re = re.Simplify()
fmt.Fprintf(w, "// %s\n", re)
fmt.Fprintf(w, "func match%s%s(s string, p int, backrefs []string) (groups [%d]int) {\n", lexerName, name, 2*re.MaxCap()+2)
flattened := flatten(re)
// Fast-path a single literal.
if len(flattened) == 1 && re.Op == syntax.OpLiteral {
n := utf8.RuneCountInString(string(re.Rune))
if re.Flags&syntax.FoldCase != 0 {
fmt.Fprintf(w, "if p+%d <= len(s) && strings.EqualFold(s[p:p+%d], %q) {\n", n, n, string(re.Rune))
} else {
if n == 1 {
fmt.Fprintf(w, "if p < len(s) && s[p] == %q {\n", re.Rune[0])
} else {
fmt.Fprintf(w, "if p+%d <= len(s) && s[p:p+%d] == %q {\n", n, n, string(re.Rune))
}
}
fmt.Fprintf(w, "groups[0] = p\n")
fmt.Fprintf(w, "groups[1] = p + %d\n", n)
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return\n")
fmt.Fprintf(w, "}\n")
return nil
}
for _, re := range flattened {
if exists(re) {
continue
}
fmt.Fprintf(w, "// %s (%s)\n", re, re.Op)
fmt.Fprintf(w, "l%d := func(s string, p int) int {\n", reid(re))
if re.Flags&syntax.NonGreedy != 0 {
panic("non-greedy match not supported: " + re.String())
}
switch re.Op {
case syntax.OpNoMatch: // matches no strings
fmt.Fprintf(w, "return p\n")
case syntax.OpEmptyMatch: // matches empty string
fmt.Fprintf(w, "if len(s) == 0 { return p }\n")
fmt.Fprintf(w, "return -1\n")
case syntax.OpLiteral: // matches Runes sequence
n := utf8.RuneCountInString(string(re.Rune))
if re.Flags&syntax.FoldCase != 0 {
if n == 1 && !unicode.IsLetter(re.Rune[0]) {
fmt.Fprintf(w, "if p < len(s) && s[p] == %q { return p+1 }\n", re.Rune[0])
} else {
fmt.Fprintf(w, "if p+%d <= len(s) && strings.EqualFold(s[p:p+%d], %q) { return p+%d }\n", n, n, string(re.Rune), n)
}
} else {
if n == 1 {
fmt.Fprintf(w, "if p < len(s) && s[p] == %q { return p+1 }\n", re.Rune[0])
} else {
fmt.Fprintf(w, "if p+%d <= len(s) && s[p:p+%d] == %q { return p+%d }\n", n, n, string(re.Rune), n)
}
}
fmt.Fprintf(w, "return -1\n")
case syntax.OpCharClass: // matches Runes interpreted as range pair list
fmt.Fprintf(w, "if len(s) <= p { return -1 }\n")
needDecode := false
asciiSet := true
for i := 0; i < len(re.Rune); i += 2 {
l, r := re.Rune[i], re.Rune[i+1]
ln, rn := utf8.RuneLen(l), utf8.RuneLen(r)
if ln != 1 || rn != 1 {
needDecode = true
}
if l > 0x7f || r > 0x7f || l != r {
asciiSet = false
}
}
if needDecode {
fmt.Fprintf(w, "var (rn rune; n int)\n")
decodeRune(w, "p", "rn", "n")
} else {
fmt.Fprintf(w, "rn := s[p]\n")
}
if asciiSet {
if len(re.Rune) == 2 {
fmt.Fprintf(w, "if rn == %q { return p+1 }\n", re.Rune[0])
} else if len(re.Rune) == 4 {
fmt.Fprintf(w, "if rn == %q || rn == %q { return p+1 }\n", re.Rune[0], re.Rune[2])
} else {
fmt.Fprintf(w, "switch rn {\n")
fmt.Fprintf(w, "case ")
for i := 0; i < len(re.Rune); i += 2 {
if i != 0 {
fmt.Fprintf(w, ",")
}
fmt.Fprintf(w, "%q", re.Rune[i])
}
fmt.Fprintf(w, ": return p+1\n")
fmt.Fprintf(w, "}\n")
}
} else {
fmt.Fprintf(w, "switch {\n")
for i := 0; i < len(re.Rune); i += 2 {
l, r := re.Rune[i], re.Rune[i+1]
ln, rn := utf8.RuneLen(l), utf8.RuneLen(r)
if ln == 1 && rn == 1 {
if l == r {
fmt.Fprintf(w, "case rn == %q: return p+1\n", l)
} else {
fmt.Fprintf(w, "case rn >= %q && rn <= %q: return p+1\n", l, r)
}
} else {
if l == r {
fmt.Fprintf(w, "case rn == %q: return p+n\n", l)
} else {
fmt.Fprintf(w, "case rn >= %q && rn <= %q: return p+n\n", l, r)
}
}
}
fmt.Fprintf(w, "}\n")
}
fmt.Fprintf(w, "return -1\n")
case syntax.OpAnyCharNotNL: // matches any character except newline
fmt.Fprintf(w, "var (rn rune; n int)\n")
decodeRune(w, "p", "rn", "n")
fmt.Fprintf(w, "if len(s) <= p+n || rn == '\\n' { return -1 }\n")
fmt.Fprintf(w, "return p+n\n")
case syntax.OpAnyChar: // matches any character
fmt.Fprintf(w, "var n int\n")
fmt.Fprintf(w, "if s[p] < utf8.RuneSelf {\n")
fmt.Fprintf(w, " n = 1\n")
fmt.Fprintf(w, "} else {\n")
fmt.Fprintf(w, " _, n = utf8.DecodeRuneInString(s[p:])\n")
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "if len(s) <= p+n { return -1 }\n")
fmt.Fprintf(w, "return p+n\n")
case syntax.OpWordBoundary, syntax.OpNoWordBoundary,
syntax.OpBeginText, syntax.OpEndText,
syntax.OpBeginLine, syntax.OpEndLine:
fmt.Fprintf(w, "var l, u rune = -1, -1\n")
fmt.Fprintf(w, "if p == 0 {\n")
fmt.Fprintf(w, " if p < len(s) {\n")
decodeRune(w, "0", "u", "_")
fmt.Fprintf(w, " }\n")
fmt.Fprintf(w, "} else if p == len(s) {\n")
fmt.Fprintf(w, " l, _ = utf8.DecodeLastRuneInString(s)\n")
fmt.Fprintf(w, "} else {\n")
fmt.Fprintf(w, " l, _ = utf8.DecodeLastRuneInString(s[0:p])\n")
decodeRune(w, "p", "u", "_")
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "op := syntax.EmptyOpContext(l, u)\n")
lut := map[syntax.Op]string{
syntax.OpWordBoundary: "EmptyWordBoundary",
syntax.OpNoWordBoundary: "EmptyNoWordBoundary",
syntax.OpBeginText: "EmptyBeginText",
syntax.OpEndText: "EmptyEndText",
syntax.OpBeginLine: "EmptyBeginLine",
syntax.OpEndLine: "EmptyEndLine",
}
fmt.Fprintf(w, "if op & syntax.%s != 0 { return p }\n", lut[re.Op])
fmt.Fprintf(w, "return -1\n")
case syntax.OpCapture: // capturing subexpression with index Cap, optional name Name
fmt.Fprintf(w, "np := l%d(s, p)\n", reid(re.Sub0[0]))
fmt.Fprintf(w, "if np != -1 {\n")
fmt.Fprintf(w, " groups[%d] = p\n", re.Cap*2)
fmt.Fprintf(w, " groups[%d] = np\n", re.Cap*2+1)
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return np")
case syntax.OpStar: // matches Sub[0] zero or more times
fmt.Fprintf(w, "for len(s) > p {\n")
fmt.Fprintf(w, "if np := l%d(s, p); np == -1 { return p } else { p = np }\n", reid(re.Sub0[0]))
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return p\n")
case syntax.OpPlus: // matches Sub[0] one or more times
fmt.Fprintf(w, "if p = l%d(s, p); p == -1 { return -1 }\n", reid(re.Sub0[0]))
fmt.Fprintf(w, "for len(s) > p {\n")
fmt.Fprintf(w, "if np := l%d(s, p); np == -1 { return p } else { p = np }\n", reid(re.Sub0[0]))
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return p\n")
case syntax.OpQuest: // matches Sub[0] zero or one times
fmt.Fprintf(w, "if np := l%d(s, p); np != -1 { return np }\n", reid(re.Sub0[0]))
fmt.Fprintf(w, "return p\n")
case syntax.OpRepeat: // matches Sub[0] at least Min times, at most Max (Max == -1 is no limit)
panic("??")
case syntax.OpConcat: // matches concatenation of Subs
for _, sub := range re.Sub {
fmt.Fprintf(w, "if p = l%d(s, p); p == -1 { return -1 }\n", reid(sub))
}
fmt.Fprintf(w, "return p\n")
case syntax.OpAlternate: // matches alternation of Subs
for _, sub := range re.Sub {
fmt.Fprintf(w, "if np := l%d(s, p); np != -1 { return np }\n", reid(sub))
}
fmt.Fprintf(w, "return -1\n")
}
fmt.Fprintf(w, "}\n")
}
fmt.Fprintf(w, "np := l%d(s, p)\n", reid(re))
fmt.Fprintf(w, "if np == -1 {\n")
fmt.Fprintf(w, " return\n")
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "groups[0] = p\n")
fmt.Fprintf(w, "groups[1] = np\n")
fmt.Fprintf(w, "return\n")
fmt.Fprintf(w, "}\n")
return nil
}
// This exists because of https://github.com/golang/go/issues/31666
func decodeRune(w io.Writer, offset string, rn string, n string) {
fmt.Fprintf(w, "if s[%s] < utf8.RuneSelf {\n", offset)
fmt.Fprintf(w, " %s, %s = rune(s[%s]), 1\n", rn, n, offset)
fmt.Fprintf(w, "} else {\n")
fmt.Fprintf(w, " %s, %s = utf8.DecodeRuneInString(s[%s:])\n", rn, n, offset)
fmt.Fprintf(w, "}\n")
}
func flatten(re *syntax.Regexp) (out []*syntax.Regexp) {
for _, sub := range re.Sub {
out = append(out, flatten(sub)...)
}
out = append(out, re)
return
}
func isSimpleRuneRange(runes []rune) bool {
for i := 0; i < len(runes); i += 2 {
if runes[i] != runes[i+1] || utf8.RuneLen(runes[i]) != 1 {
return false
}
}
return true
}
golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/go.mod 0000664 0000000 0000000 00000000325 15053003664 0025051 0 ustar 00root root 0000000 0000000 module github.com/alecthomas/participle/v2/cmd/participle
go 1.18
require (
github.com/alecthomas/kong v1.6.1
github.com/alecthomas/participle/v2 v2.1.1
)
replace github.com/alecthomas/participle/v2 => ../..
golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/go.sum 0000664 0000000 0000000 00000004460 15053003664 0025102 0 ustar 00root root 0000000 0000000 github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0=
github.com/alecthomas/kong v0.6.1 h1:1kNhcFepkR+HmasQpbiKDLylIL8yh5B5y1zPp5bJimA=
github.com/alecthomas/kong v0.6.1/go.mod h1:JfHWDzLmbh/puW6I3V7uWenoh56YNVONW+w8eKeUr9I=
github.com/alecthomas/kong v0.7.1 h1:azoTh0IOfwlAX3qN9sHWTxACE2oV8Bg2gAwBsMwDQY4=
github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
github.com/alecthomas/kong v0.8.0 h1:ryDCzutfIqJPnNn0omnrgHLbAggDQM2VWHikE1xqK7s=
github.com/alecthomas/kong v0.8.0/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
github.com/alecthomas/kong v0.8.1 h1:acZdn3m4lLRobeh3Zi2S2EpnXTd1mOL6U7xVml+vfkY=
github.com/alecthomas/kong v0.8.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
github.com/alecthomas/kong v1.2.1 h1:E8jH4Tsgv6wCRX2nGrdPyHDUCSG83WH2qE4XLACD33Q=
github.com/alecthomas/kong v1.2.1/go.mod h1:rKTSFhbdp3Ryefn8x5MOEprnRFQ7nlmMC01GKhehhBM=
github.com/alecthomas/kong v1.6.1 h1:/7bVimARU3uxPD0hbryPE8qWrS3Oz3kPQoxA/H2NKG8=
github.com/alecthomas/kong v1.6.1/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8=
github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
golang-github-alecthomas-participle-v2-2.1.4/cmd/participle/main.go 0000664 0000000 0000000 00000000671 15053003664 0025222 0 ustar 00root root 0000000 0000000 package main
import "github.com/alecthomas/kong"
var (
version string = "dev"
cli struct {
Version kong.VersionFlag
Gen struct {
Lexer genLexerCmd `cmd:"" help:"Generate a lexer."`
} `cmd:"" help:"Generate code to accelerate Participle."`
}
)
func main() {
kctx := kong.Parse(&cli,
kong.Description(`A command-line tool for Participle.`),
kong.Vars{"version": version},
)
err := kctx.Run()
kctx.FatalIfErrorf(err)
}
golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/ 0000775 0000000 0000000 00000000000 15053003664 0023404 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/assets/ 0000775 0000000 0000000 00000000000 15053003664 0024706 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/assets/railroad-diagrams.css 0000664 0000000 0000000 00000001747 15053003664 0031013 0 ustar 00root root 0000000 0000000 svg.railroad-diagram {
background-color: hsl(30,20%,95%);
}
svg.railroad-diagram path {
stroke-width: 3;
stroke: black;
fill: rgba(0,0,0,0);
}
svg.railroad-diagram text {
font: bold 14px monospace;
text-anchor: middle;
white-space: pre;
}
svg.railroad-diagram text.diagram-text {
font-size: 12px;
}
svg.railroad-diagram text.diagram-arrow {
font-size: 16px;
}
svg.railroad-diagram text.label {
text-anchor: start;
}
svg.railroad-diagram text.comment {
font: italic 12px monospace;
}
svg.railroad-diagram g.non-terminal text {
/*font-style: italic;*/
}
svg.railroad-diagram rect {
stroke-width: 3;
stroke: black;
fill: hsl(120,100%,90%);
}
svg.railroad-diagram rect.group-box {
stroke: gray;
stroke-dasharray: 10 5;
fill: none;
}
svg.railroad-diagram path.diagram-text {
stroke-width: 3;
stroke: black;
fill: white;
cursor: help;
}
svg.railroad-diagram g.diagram-text:hover path.diagram-text {
fill: #eee;
}
golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/assets/railroad-diagrams.js 0000664 0000000 0000000 00000126163 15053003664 0030637 0 ustar 00root root 0000000 0000000 "use strict";
/*
Railroad Diagrams
by Tab Atkins Jr. (and others)
http://xanthir.com
http://twitter.com/tabatkins
http://github.com/tabatkins/railroad-diagrams
This document and all associated files in the github project are licensed under CC0: http://creativecommons.org/publicdomain/zero/1.0/
This means you can reuse, remix, or otherwise appropriate this project for your own use WITHOUT RESTRICTION.
(The actual legal meaning can be found at the above link.)
Don't ask me for permission to use any part of this project, JUST USE IT.
I would appreciate attribution, but that is not required by the license.
*/
/*
This file uses a module pattern to avoid leaking names into the global scope.
Should be compatible with AMD, CommonJS, and plain ol' browser JS.
As well, several configuration constants are passed into the module function at the bottom of this file.
At runtime, these constants can be found on the Diagram class,
and can be changed before creating a Diagram.
*/
(function(options) {
var funcs = {};
function subclassOf(baseClass, superClass) {
baseClass.prototype = Object.create(superClass.prototype);
baseClass.prototype.$super = superClass.prototype;
}
function unnull(/* children */) {
return [].slice.call(arguments).reduce(function(sofar, x) { return sofar !== undefined ? sofar : x; });
}
function determineGaps(outer, inner) {
var diff = outer - inner;
switch(Diagram.INTERNAL_ALIGNMENT) {
case 'left': return [0, diff]; break;
case 'right': return [diff, 0]; break;
case 'center':
default: return [diff/2, diff/2]; break;
}
}
function wrapString(value) {
return value instanceof FakeSVG ? value : new Terminal(""+value);
}
function sum(iter, func) {
if(!func) func = function(x) { return x; };
return iter.map(func).reduce(function(a,b){return a+b}, 0);
}
function max(iter, func) {
if(!func) func = function(x) { return x; };
return Math.max.apply(null, iter.map(func));
}
function* enumerate(iter) {
var count = 0;
for(const x of iter) {
yield [count, x];
count++;
}
}
var SVG = funcs.SVG = function SVG(name, attrs, text) {
attrs = attrs || {};
text = text || '';
var el = document.createElementNS("http://www.w3.org/2000/svg",name);
for(var attr in attrs) {
if(attr === 'xlink:href')
el.setAttributeNS("http://www.w3.org/1999/xlink", 'href', attrs[attr]);
else
el.setAttribute(attr, attrs[attr]);
}
el.textContent = text;
return el;
}
var FakeSVG = funcs.FakeSVG = function FakeSVG(tagName, attrs, text){
if(!(this instanceof FakeSVG)) return new FakeSVG(tagName, attrs, text);
if(text) this.children = text;
else this.children = [];
this.tagName = tagName;
this.attrs = unnull(attrs, {});
return this;
};
FakeSVG.prototype.format = function(x, y, width) {
// Virtual
};
FakeSVG.prototype.addTo = function(parent) {
if(parent instanceof FakeSVG) {
parent.children.push(this);
return this;
} else {
var svg = this.toSVG();
parent.appendChild(svg);
return svg;
}
};
FakeSVG.prototype.escapeString = function(string) {
// Escape markdown and HTML special characters
return string.replace(/[*_\`\[\]<&]/g, function(charString) {
return '' + charString.charCodeAt(0) + ';';
});
};
FakeSVG.prototype.toSVG = function() {
var el = SVG(this.tagName, this.attrs);
if(typeof this.children == 'string') {
el.textContent = this.children;
} else {
this.children.forEach(function(e) {
el.appendChild(e.toSVG());
});
}
return el;
};
FakeSVG.prototype.toString = function() {
var str = '<' + this.tagName;
var group = this.tagName == "g" || this.tagName == "svg";
for(var attr in this.attrs) {
str += ' ' + attr + '="' + (this.attrs[attr]+'').replace(/&/g, '&').replace(/"/g, '"') + '"';
}
str += '>';
if(group) str += "\n";
if(typeof this.children == 'string') {
str += FakeSVG.prototype.escapeString(this.children);
} else {
this.children.forEach(function(e) {
str += e;
});
}
str += '' + this.tagName + '>\n';
return str;
}
FakeSVG.prototype.walk = function(cb) {
cb(this);
}
var Path = funcs.Path = function Path(x,y) {
if(!(this instanceof Path)) return new Path(x,y);
FakeSVG.call(this, 'path');
this.attrs.d = "M"+x+' '+y;
}
subclassOf(Path, FakeSVG);
Path.prototype.m = function(x,y) {
this.attrs.d += 'm'+x+' '+y;
return this;
}
Path.prototype.h = function(val) {
this.attrs.d += 'h'+val;
return this;
}
Path.prototype.right = function(val) { return this.h(Math.max(0, val)); }
Path.prototype.left = function(val) { return this.h(-Math.max(0, val)); }
Path.prototype.v = function(val) {
this.attrs.d += 'v'+val;
return this;
}
Path.prototype.down = function(val) { return this.v(Math.max(0, val)); }
Path.prototype.up = function(val) { return this.v(-Math.max(0, val)); }
Path.prototype.arc = function(sweep){
// 1/4 of a circle
var x = Diagram.ARC_RADIUS;
var y = Diagram.ARC_RADIUS;
if(sweep[0] == 'e' || sweep[1] == 'w') {
x *= -1;
}
if(sweep[0] == 's' || sweep[1] == 'n') {
y *= -1;
}
if(sweep == 'ne' || sweep == 'es' || sweep == 'sw' || sweep == 'wn') {
var cw = 1;
} else {
var cw = 0;
}
this.attrs.d += "a"+Diagram.ARC_RADIUS+" "+Diagram.ARC_RADIUS+" 0 0 "+cw+' '+x+' '+y;
return this;
}
Path.prototype.arc_8 = function(start, dir) {
// 1/8 of a circle
const arc = Diagram.ARC_RADIUS;
const s2 = 1/Math.sqrt(2) * arc;
const s2inv = (arc - s2);
let path = "a " + arc + " " + arc + " 0 0 " + (dir=='cw' ? "1" : "0") + " ";
const sd = start+dir;
const offset =
sd == 'ncw' ? [s2, s2inv] :
sd == 'necw' ? [s2inv, s2] :
sd == 'ecw' ? [-s2inv, s2] :
sd == 'secw' ? [-s2, s2inv] :
sd == 'scw' ? [-s2, -s2inv] :
sd == 'swcw' ? [-s2inv, -s2] :
sd == 'wcw' ? [s2inv, -s2] :
sd == 'nwcw' ? [s2, -s2inv] :
sd == 'nccw' ? [-s2, s2inv] :
sd == 'nwccw' ? [-s2inv, s2] :
sd == 'wccw' ? [s2inv, s2] :
sd == 'swccw' ? [s2, s2inv] :
sd == 'sccw' ? [s2, -s2inv] :
sd == 'seccw' ? [s2inv, -s2] :
sd == 'eccw' ? [-s2inv, -s2] :
sd == 'neccw' ? [-s2, -s2inv] : null
;
path += offset.join(" ");
this.attrs.d += path;
return this;
}
Path.prototype.l = function(x, y) {
this.attrs.d += 'l'+x+' '+y;
return this;
}
Path.prototype.format = function() {
// All paths in this library start/end horizontally.
// The extra .5 ensures a minor overlap, so there's no seams in bad rasterizers.
this.attrs.d += 'h.5';
return this;
}
var DiagramMultiContainer = funcs.DiagramMultiContainer = function DiagramMultiContainer(tagName, items, attrs, text) {
FakeSVG.call(this, tagName, attrs, text);
this.items = items.map(wrapString);
}
subclassOf(DiagramMultiContainer, FakeSVG);
DiagramMultiContainer.prototype.walk = function(cb) {
cb(this);
this.items.forEach(x=>w.walk(cb));
}
var Diagram = funcs.Diagram = function Diagram(items) {
if(!(this instanceof Diagram)) return new Diagram([].slice.call(arguments));
DiagramMultiContainer.call(this, 'svg', items, {class: Diagram.DIAGRAM_CLASS});
if(!(this.items[0] instanceof Start)) {
this.items.unshift(new Start());
}
if(!(this.items[this.items.length-1] instanceof End)) {
this.items.push(new End());
}
this.up = this.down = this.height = this.width = 0;
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
this.width += item.width + (item.needsSpace?20:0);
this.up = Math.max(this.up, item.up - this.height);
this.height += item.height;
this.down = Math.max(this.down - item.height, item.down);
}
this.formatted = false;
}
subclassOf(Diagram, DiagramMultiContainer);
for(var option in options) {
Diagram[option] = options[option];
}
Diagram.prototype.format = function(paddingt, paddingr, paddingb, paddingl) {
paddingt = unnull(paddingt, 20);
paddingr = unnull(paddingr, paddingt, 20);
paddingb = unnull(paddingb, paddingt, 20);
paddingl = unnull(paddingl, paddingr, 20);
var x = paddingl;
var y = paddingt;
y += this.up;
var g = FakeSVG('g', Diagram.STROKE_ODD_PIXEL_LENGTH ? {transform:'translate(.5 .5)'} : {});
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
if(item.needsSpace) {
Path(x,y).h(10).addTo(g);
x += 10;
}
item.format(x, y, item.width).addTo(g);
x += item.width;
y += item.height;
if(item.needsSpace) {
Path(x,y).h(10).addTo(g);
x += 10;
}
}
this.attrs.width = this.width + paddingl + paddingr;
this.attrs.height = this.up + this.height + this.down + paddingt + paddingb;
this.attrs.viewBox = "0 0 " + this.attrs.width + " " + this.attrs.height;
g.addTo(this);
this.formatted = true;
return this;
}
Diagram.prototype.addTo = function(parent) {
if(!parent) {
var scriptTag = document.getElementsByTagName('script');
scriptTag = scriptTag[scriptTag.length - 1];
parent = scriptTag.parentNode;
}
return this.$super.addTo.call(this, parent);
}
Diagram.prototype.toSVG = function() {
if (!this.formatted) {
this.format();
}
return this.$super.toSVG.call(this);
}
Diagram.prototype.toString = function() {
if (!this.formatted) {
this.format();
}
return this.$super.toString.call(this);
}
Diagram.DEBUG = false;
var ComplexDiagram = funcs.ComplexDiagram = function ComplexDiagram() {
var diagram = new Diagram([].slice.call(arguments));
var items = diagram.items;
items.shift();
items.pop();
items.unshift(new Start({type:"complex"}));
items.push(new End({type:"complex"}));
diagram.items = items;
return diagram;
}
var Sequence = funcs.Sequence = function Sequence(items) {
if(!(this instanceof Sequence)) return new Sequence([].slice.call(arguments));
DiagramMultiContainer.call(this, 'g', items);
var numberOfItems = this.items.length;
this.needsSpace = true;
this.up = this.down = this.height = this.width = 0;
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
this.width += item.width + (item.needsSpace?20:0);
this.up = Math.max(this.up, item.up - this.height);
this.height += item.height;
this.down = Math.max(this.down - item.height, item.down);
}
if(this.items[0].needsSpace) this.width -= 10;
if(this.items[this.items.length-1].needsSpace) this.width -= 10;
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "sequence"
}
}
subclassOf(Sequence, DiagramMultiContainer);
Sequence.prototype.format = function(x,y,width) {
// Hook up the two sides if this is narrower than its stated width.
var gaps = determineGaps(width, this.width);
Path(x,y).h(gaps[0]).addTo(this);
Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this);
x += gaps[0];
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
if(item.needsSpace && i > 0) {
Path(x,y).h(10).addTo(this);
x += 10;
}
item.format(x, y, item.width).addTo(this);
x += item.width;
y += item.height;
if(item.needsSpace && i < this.items.length-1) {
Path(x,y).h(10).addTo(this);
x += 10;
}
}
return this;
}
var Stack = funcs.Stack = function Stack(items) {
if(!(this instanceof Stack)) return new Stack([].slice.call(arguments));
DiagramMultiContainer.call(this, 'g', items);
if( items.length === 0 ) {
throw new RangeError("Stack() must have at least one child.");
}
this.width = Math.max.apply(null, this.items.map(function(e) { return e.width + (e.needsSpace?20:0); }));
//if(this.items[0].needsSpace) this.width -= 10;
//if(this.items[this.items.length-1].needsSpace) this.width -= 10;
if(this.items.length > 1){
this.width += Diagram.ARC_RADIUS*2;
}
this.needsSpace = true;
this.up = this.items[0].up;
this.down = this.items[this.items.length-1].down;
this.height = 0;
var last = this.items.length - 1;
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
this.height += item.height;
if(i > 0) {
this.height += Math.max(Diagram.ARC_RADIUS*2, item.up + Diagram.VERTICAL_SEPARATION);
}
if(i < last) {
this.height += Math.max(Diagram.ARC_RADIUS*2, item.down + Diagram.VERTICAL_SEPARATION);
}
}
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "stack"
}
}
subclassOf(Stack, DiagramMultiContainer);
Stack.prototype.format = function(x,y,width) {
var gaps = determineGaps(width, this.width);
Path(x,y).h(gaps[0]).addTo(this);
x += gaps[0];
var xInitial = x;
if(this.items.length > 1) {
Path(x, y).h(Diagram.ARC_RADIUS).addTo(this);
x += Diagram.ARC_RADIUS;
}
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
var innerWidth = this.width - (this.items.length>1 ? Diagram.ARC_RADIUS*2 : 0);
item.format(x, y, innerWidth).addTo(this);
x += innerWidth;
y += item.height;
if(i !== this.items.length-1) {
Path(x, y)
.arc('ne').down(Math.max(0, item.down + Diagram.VERTICAL_SEPARATION - Diagram.ARC_RADIUS*2))
.arc('es').left(innerWidth)
.arc('nw').down(Math.max(0, this.items[i+1].up + Diagram.VERTICAL_SEPARATION - Diagram.ARC_RADIUS*2))
.arc('ws').addTo(this);
y += Math.max(item.down + Diagram.VERTICAL_SEPARATION, Diagram.ARC_RADIUS*2) + Math.max(this.items[i+1].up + Diagram.VERTICAL_SEPARATION, Diagram.ARC_RADIUS*2);
//y += Math.max(Diagram.ARC_RADIUS*4, item.down + Diagram.VERTICAL_SEPARATION*2 + this.items[i+1].up)
x = xInitial+Diagram.ARC_RADIUS;
}
}
if(this.items.length > 1) {
Path(x,y).h(Diagram.ARC_RADIUS).addTo(this);
x += Diagram.ARC_RADIUS;
}
Path(x,y).h(gaps[1]).addTo(this);
return this;
}
var OptionalSequence = funcs.OptionalSequence = function OptionalSequence(items) {
if(!(this instanceof OptionalSequence)) return new OptionalSequence([].slice.call(arguments));
DiagramMultiContainer.call(this, 'g', items);
if( items.length === 0 ) {
throw new RangeError("OptionalSequence() must have at least one child.");
}
if( items.length === 1 ) {
return new Sequence(items);
}
var arc = Diagram.ARC_RADIUS;
this.needsSpace = false;
this.width = 0;
this.up = 0;
this.height = sum(this.items, function(x){return x.height});
this.down = this.items[0].down;
var heightSoFar = 0;
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
this.up = Math.max(this.up, Math.max(arc*2, item.up + Diagram.VERTICAL_SEPARATION) - heightSoFar);
heightSoFar += item.height;
if(i > 0) {
this.down = Math.max(this.height + this.down, heightSoFar + Math.max(arc*2, item.down + Diagram.VERTICAL_SEPARATION)) - this.height;
}
var itemWidth = (item.needsSpace?10:0) + item.width;
if(i == 0) {
this.width += arc + Math.max(itemWidth, arc);
} else {
this.width += arc*2 + Math.max(itemWidth, arc) + arc;
}
}
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "optseq"
}
}
subclassOf(OptionalSequence, DiagramMultiContainer);
OptionalSequence.prototype.format = function(x, y, width) {
var arc = Diagram.ARC_RADIUS;
var gaps = determineGaps(width, this.width);
Path(x, y).right(gaps[0]).addTo(this);
Path(x + gaps[0] + this.width, y + this.height).right(gaps[1]).addTo(this);
x += gaps[0]
var upperLineY = y - this.up;
var last = this.items.length - 1;
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
var itemSpace = (item.needsSpace?10:0);
var itemWidth = item.width + itemSpace;
if(i == 0) {
// Upper skip
Path(x,y)
.arc('se')
.up(y - upperLineY - arc*2)
.arc('wn')
.right(itemWidth - arc)
.arc('ne')
.down(y + item.height - upperLineY - arc*2)
.arc('ws')
.addTo(this);
// Straight line
Path(x, y)
.right(itemSpace + arc)
.addTo(this);
item.format(x + itemSpace + arc, y, item.width).addTo(this);
x += itemWidth + arc;
y += item.height;
// x ends on the far side of the first element,
// where the next element's skip needs to begin
} else if(i < last) {
// Upper skip
Path(x, upperLineY)
.right(arc*2 + Math.max(itemWidth, arc) + arc)
.arc('ne')
.down(y - upperLineY + item.height - arc*2)
.arc('ws')
.addTo(this);
// Straight line
Path(x,y)
.right(arc*2)
.addTo(this);
item.format(x + arc*2, y, item.width).addTo(this);
Path(x + item.width + arc*2, y + item.height)
.right(itemSpace + arc)
.addTo(this);
// Lower skip
Path(x,y)
.arc('ne')
.down(item.height + Math.max(item.down + Diagram.VERTICAL_SEPARATION, arc*2) - arc*2)
.arc('ws')
.right(itemWidth - arc)
.arc('se')
.up(item.down + Diagram.VERTICAL_SEPARATION - arc*2)
.arc('wn')
.addTo(this);
x += arc*2 + Math.max(itemWidth, arc) + arc;
y += item.height;
} else {
// Straight line
Path(x, y)
.right(arc*2)
.addTo(this);
item.format(x + arc*2, y, item.width).addTo(this);
Path(x + arc*2 + item.width, y + item.height)
.right(itemSpace + arc)
.addTo(this);
// Lower skip
Path(x,y)
.arc('ne')
.down(item.height + Math.max(item.down + Diagram.VERTICAL_SEPARATION, arc*2) - arc*2)
.arc('ws')
.right(itemWidth - arc)
.arc('se')
.up(item.down + Diagram.VERTICAL_SEPARATION - arc*2)
.arc('wn')
.addTo(this);
}
}
return this;
}
var AlternatingSequence = funcs.AlternatingSequence = function AlternatingSequence(items) {
if(!(this instanceof AlternatingSequence)) return new AlternatingSequence([].slice.call(arguments));
DiagramMultiContainer.call(this, 'g', items);
if( items.length === 1 ) {
return new Sequence(items);
}
if( items.length !== 2 ) {
throw new RangeError("AlternatingSequence() must have one or two children.");
}
this.needsSpace = false;
const arc = Diagram.ARC_RADIUS;
const vert = Diagram.VERTICAL_SEPARATION;
const max = Math.max;
const first = this.items[0];
const second = this.items[1];
const arcX = 1 / Math.sqrt(2) * arc * 2;
const arcY = (1 - 1 / Math.sqrt(2)) * arc * 2;
const crossY = Math.max(arc, Diagram.VERTICAL_SEPARATION);
const crossX = (crossY - arcY) + arcX;
const firstOut = max(arc + arc, crossY/2 + arc + arc, crossY/2 + vert + first.down);
this.up = firstOut + first.height + first.up;
const secondIn = max(arc + arc, crossY/2 + arc + arc, crossY/2 + vert + second.up);
this.down = secondIn + second.height + second.down;
this.height = 0;
const firstWidth = 2*(first.needsSpace?10:0) + first.width;
const secondWidth = 2*(second.needsSpace?10:0) + second.width;
this.width = 2*arc + max(firstWidth, crossX, secondWidth) + 2*arc;
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "altseq"
}
}
subclassOf(AlternatingSequence, DiagramMultiContainer);
AlternatingSequence.prototype.format = function(x, y, width) {
const arc = Diagram.ARC_RADIUS;
const gaps = determineGaps(width, this.width);
Path(x,y).right(gaps[0]).addTo(this);
console.log(gaps);
x += gaps[0];
Path(x+this.width, y).right(gaps[1]).addTo(this);
// bounding box
//Path(x+gaps[0], y).up(this.up).right(this.width).down(this.up+this.down).left(this.width).up(this.down).addTo(this);
const first = this.items[0];
const second = this.items[1];
// top
const firstIn = this.up - first.up;
const firstOut = this.up - first.up - first.height;
Path(x,y).arc('se').up(firstIn-2*arc).arc('wn').addTo(this);
first.format(x + 2*arc, y - firstIn, this.width - 4*arc).addTo(this);
Path(x + this.width - 2*arc, y - firstOut).arc('ne').down(firstOut - 2*arc).arc('ws').addTo(this);
// bottom
const secondIn = this.down - second.down - second.height;
const secondOut = this.down - second.down;
Path(x,y).arc('ne').down(secondIn - 2*arc).arc('ws').addTo(this);
second.format(x + 2*arc, y + secondIn, this.width - 4*arc).addTo(this);
Path(x + this.width - 2*arc, y + secondOut).arc('se').up(secondOut - 2*arc).arc('wn').addTo(this);
// crossover
const arcX = 1 / Math.sqrt(2) * arc * 2;
const arcY = (1 - 1 / Math.sqrt(2)) * arc * 2;
const crossY = Math.max(arc, Diagram.VERTICAL_SEPARATION);
const crossX = (crossY - arcY) + arcX;
const crossBar = (this.width - 4*arc - crossX)/2;
Path(x+arc, y - crossY/2 - arc).arc('ws').right(crossBar)
.arc_8('n', 'cw').l(crossX - arcX, crossY - arcY).arc_8('sw', 'ccw')
.right(crossBar).arc('ne').addTo(this);
Path(x+arc, y + crossY/2 + arc).arc('wn').right(crossBar)
.arc_8('s', 'ccw').l(crossX - arcX, -(crossY - arcY)).arc_8('nw', 'cw')
.right(crossBar).arc('se').addTo(this);
return this;
}
var Choice = funcs.Choice = function Choice(normal, items) {
if(!(this instanceof Choice)) return new Choice(normal, [].slice.call(arguments,1));
DiagramMultiContainer.call(this, 'g', items);
if( typeof normal !== "number" || normal !== Math.floor(normal) ) {
throw new TypeError("The first argument of Choice() must be an integer.");
} else if(normal < 0 || normal >= items.length) {
throw new RangeError("The first argument of Choice() must be an index for one of the items.");
} else {
this.normal = normal;
}
var first = 0;
var last = items.length - 1;
this.width = Math.max.apply(null, this.items.map(function(el){return el.width})) + Diagram.ARC_RADIUS*4;
this.height = this.items[normal].height;
this.up = this.items[first].up;
for(var i = first; i < normal; i++) {
if(i == normal-1) var arcs = Diagram.ARC_RADIUS*2;
else var arcs = Diagram.ARC_RADIUS;
this.up += Math.max(arcs, this.items[i].height + this.items[i].down + Diagram.VERTICAL_SEPARATION + this.items[i+1].up);
}
this.down = this.items[last].down;
for(var i = normal+1; i <= last; i++) {
if(i == normal+1) var arcs = Diagram.ARC_RADIUS*2;
else var arcs = Diagram.ARC_RADIUS;
this.down += Math.max(arcs, this.items[i-1].height + this.items[i-1].down + Diagram.VERTICAL_SEPARATION + this.items[i].up);
}
this.down -= this.items[normal].height; // already counted in Choice.height
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "choice"
}
}
subclassOf(Choice, DiagramMultiContainer);
Choice.prototype.format = function(x,y,width) {
// Hook up the two sides if this is narrower than its stated width.
var gaps = determineGaps(width, this.width);
Path(x,y).h(gaps[0]).addTo(this);
Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this);
x += gaps[0];
var last = this.items.length -1;
var innerWidth = this.width - Diagram.ARC_RADIUS*4;
// Do the elements that curve above
for(var i = this.normal - 1; i >= 0; i--) {
var item = this.items[i];
if( i == this.normal - 1 ) {
var distanceFromY = Math.max(Diagram.ARC_RADIUS*2, this.items[this.normal].up + Diagram.VERTICAL_SEPARATION + item.down + item.height);
}
Path(x,y)
.arc('se')
.up(distanceFromY - Diagram.ARC_RADIUS*2)
.arc('wn').addTo(this);
item.format(x+Diagram.ARC_RADIUS*2,y - distanceFromY,innerWidth).addTo(this);
Path(x+Diagram.ARC_RADIUS*2+innerWidth, y-distanceFromY+item.height)
.arc('ne')
.down(distanceFromY - item.height + this.height - Diagram.ARC_RADIUS*2)
.arc('ws').addTo(this);
distanceFromY += Math.max(Diagram.ARC_RADIUS, item.up + Diagram.VERTICAL_SEPARATION + (i == 0 ? 0 : this.items[i-1].down+this.items[i-1].height));
}
// Do the straight-line path.
Path(x,y).right(Diagram.ARC_RADIUS*2).addTo(this);
this.items[this.normal].format(x+Diagram.ARC_RADIUS*2, y, innerWidth).addTo(this);
Path(x+Diagram.ARC_RADIUS*2+innerWidth, y+this.height).right(Diagram.ARC_RADIUS*2).addTo(this);
// Do the elements that curve below
for(var i = this.normal+1; i <= last; i++) {
var item = this.items[i];
if( i == this.normal + 1 ) {
var distanceFromY = Math.max(Diagram.ARC_RADIUS*2, this.height + this.items[this.normal].down + Diagram.VERTICAL_SEPARATION + item.up);
}
Path(x,y)
.arc('ne')
.down(distanceFromY - Diagram.ARC_RADIUS*2)
.arc('ws').addTo(this);
item.format(x+Diagram.ARC_RADIUS*2, y+distanceFromY, innerWidth).addTo(this);
Path(x+Diagram.ARC_RADIUS*2+innerWidth, y+distanceFromY+item.height)
.arc('se')
.up(distanceFromY - Diagram.ARC_RADIUS*2 + item.height - this.height)
.arc('wn').addTo(this);
distanceFromY += Math.max(Diagram.ARC_RADIUS, item.height + item.down + Diagram.VERTICAL_SEPARATION + (i == last ? 0 : this.items[i+1].up));
}
return this;
}
var HorizontalChoice = funcs.HorizontalChoice = function HorizontalChoice(items) {
if(!(this instanceof HorizontalChoice)) return new HorizontalChoice([].slice.call(arguments));
if( items.length === 0 ) {
throw new RangeError("HorizontalChoice() must have at least one child.");
}
if( items.length === 1) {
return new Sequence(items);
}
DiagramMultiContainer.call(this, 'g', items);
const allButLast = this.items.slice(0, -1);
const middles = this.items.slice(1, -1);
const first = this.items[0];
const last = this.items[this.items.length - 1];
this.needsSpace = false;
this.width = Diagram.ARC_RADIUS; // starting track
this.width += Diagram.ARC_RADIUS*2 * (this.items.length-1); // inbetween tracks
this.width += sum(this.items, x=>x.width + (x.needsSpace?20:0)); // items
this.width += (last.height > 0 ? Diagram.ARC_RADIUS : 0); // needs space to curve up
this.width += Diagram.ARC_RADIUS; //ending track
// Always exits at entrance height
this.height = 0;
// All but the last have a track running above them
this._upperTrack = Math.max(
Diagram.ARC_RADIUS*2,
Diagram.VERTICAL_SEPARATION,
max(allButLast, x=>x.up) + Diagram.VERTICAL_SEPARATION
);
this.up = Math.max(this._upperTrack, last.up);
// All but the first have a track running below them
// Last either straight-lines or curves up, so has different calculation
this._lowerTrack = Math.max(
Diagram.VERTICAL_SEPARATION,
max(middles, x=>x.height+Math.max(x.down+Diagram.VERTICAL_SEPARATION, Diagram.ARC_RADIUS*2)),
last.height + last.down + Diagram.VERTICAL_SEPARATION
);
if(first.height < this._lowerTrack) {
// Make sure there's at least 2*AR room between first exit and lower track
this._lowerTrack = Math.max(this._lowerTrack, first.height + Diagram.ARC_RADIUS*2);
}
this.down = Math.max(this._lowerTrack, first.height + first.down);
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "horizontalchoice"
}
}
subclassOf(HorizontalChoice, DiagramMultiContainer);
HorizontalChoice.prototype.format = function(x,y,width) {
// Hook up the two sides if this is narrower than its stated width.
var gaps = determineGaps(width, this.width);
new Path(x,y).h(gaps[0]).addTo(this);
new Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this);
x += gaps[0];
const first = this.items[0];
const last = this.items[this.items.length-1];
const allButFirst = this.items.slice(1);
const allButLast = this.items.slice(0, -1);
// upper track
var upperSpan = (sum(allButLast, x=>x.width+(x.needsSpace?20:0))
+ (this.items.length - 2) * Diagram.ARC_RADIUS*2
- Diagram.ARC_RADIUS
);
new Path(x,y)
.arc('se')
.v(-(this._upperTrack - Diagram.ARC_RADIUS*2))
.arc('wn')
.h(upperSpan)
.addTo(this);
// lower track
var lowerSpan = (sum(allButFirst, x=>x.width+(x.needsSpace?20:0))
+ (this.items.length - 2) * Diagram.ARC_RADIUS*2
+ (last.height > 0 ? Diagram.ARC_RADIUS : 0)
- Diagram.ARC_RADIUS
);
var lowerStart = x + Diagram.ARC_RADIUS + first.width+(first.needsSpace?20:0) + Diagram.ARC_RADIUS*2;
new Path(lowerStart, y+this._lowerTrack)
.h(lowerSpan)
.arc('se')
.v(-(this._lowerTrack - Diagram.ARC_RADIUS*2))
.arc('wn')
.addTo(this);
// Items
for(const [i, item] of enumerate(this.items)) {
// input track
if(i === 0) {
new Path(x,y)
.h(Diagram.ARC_RADIUS)
.addTo(this);
x += Diagram.ARC_RADIUS;
} else {
new Path(x, y - this._upperTrack)
.arc('ne')
.v(this._upperTrack - Diagram.ARC_RADIUS*2)
.arc('ws')
.addTo(this);
x += Diagram.ARC_RADIUS*2;
}
// item
var itemWidth = item.width + (item.needsSpace?20:0);
item.format(x, y, itemWidth).addTo(this);
x += itemWidth;
// output track
if(i === this.items.length-1) {
if(item.height === 0) {
new Path(x,y)
.h(Diagram.ARC_RADIUS)
.addTo(this);
} else {
new Path(x,y+item.height)
.arc('se')
.addTo(this);
}
} else if(i === 0 && item.height > this._lowerTrack) {
// Needs to arc up to meet the lower track, not down.
if(item.height - this._lowerTrack >= Diagram.ARC_RADIUS*2) {
new Path(x, y+item.height)
.arc('se')
.v(this._lowerTrack - item.height + Diagram.ARC_RADIUS*2)
.arc('wn')
.addTo(this);
} else {
// Not enough space to fit two arcs
// so just bail and draw a straight line for now.
new Path(x, y+item.height)
.l(Diagram.ARC_RADIUS*2, this._lowerTrack - item.height)
.addTo(this);
}
} else {
new Path(x, y+item.height)
.arc('ne')
.v(this._lowerTrack - item.height - Diagram.ARC_RADIUS*2)
.arc('ws')
.addTo(this);
}
}
return this;
}
var MultipleChoice = funcs.MultipleChoice = function MultipleChoice(normal, type, items) {
if(!(this instanceof MultipleChoice)) return new MultipleChoice(normal, type, [].slice.call(arguments,2));
DiagramMultiContainer.call(this, 'g', items);
if( typeof normal !== "number" || normal !== Math.floor(normal) ) {
throw new TypeError("The first argument of MultipleChoice() must be an integer.");
} else if(normal < 0 || normal >= items.length) {
throw new RangeError("The first argument of MultipleChoice() must be an index for one of the items.");
} else {
this.normal = normal;
}
if( type != "any" && type != "all" ) {
throw new SyntaxError("The second argument of MultipleChoice must be 'any' or 'all'.");
} else {
this.type = type;
}
this.needsSpace = true;
this.innerWidth = max(this.items, function(x){return x.width});
this.width = 30 + Diagram.ARC_RADIUS + this.innerWidth + Diagram.ARC_RADIUS + 20;
this.up = this.items[0].up;
this.down = this.items[this.items.length-1].down;
this.height = this.items[normal].height;
for(var i = 0; i < this.items.length; i++) {
var item = this.items[i];
if(i == normal - 1 || i == normal + 1) var minimum = 10 + Diagram.ARC_RADIUS;
else var minimum = Diagram.ARC_RADIUS;
if(i < normal) {
this.up += Math.max(minimum, item.height + item.down + Diagram.VERTICAL_SEPARATION + this.items[i+1].up);
} else if(i > normal) {
this.down += Math.max(minimum, item.up + Diagram.VERTICAL_SEPARATION + this.items[i-1].down + this.items[i-1].height);
}
}
this.down -= this.items[normal].height; // already counted in this.height
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "multiplechoice"
}
}
subclassOf(MultipleChoice, DiagramMultiContainer);
MultipleChoice.prototype.format = function(x, y, width) {
var gaps = determineGaps(width, this.width);
Path(x, y).right(gaps[0]).addTo(this);
Path(x + gaps[0] + this.width, y + this.height).right(gaps[1]).addTo(this);
x += gaps[0];
var normal = this.items[this.normal];
// Do the elements that curve above
for(var i = this.normal - 1; i >= 0; i--) {
var item = this.items[i];
if( i == this.normal - 1 ) {
var distanceFromY = Math.max(10 + Diagram.ARC_RADIUS, normal.up + Diagram.VERTICAL_SEPARATION + item.down + item.height);
}
Path(x + 30,y)
.up(distanceFromY - Diagram.ARC_RADIUS)
.arc('wn').addTo(this);
item.format(x + 30 + Diagram.ARC_RADIUS, y - distanceFromY, this.innerWidth).addTo(this);
Path(x + 30 + Diagram.ARC_RADIUS + this.innerWidth, y - distanceFromY + item.height)
.arc('ne')
.down(distanceFromY - item.height + this.height - Diagram.ARC_RADIUS - 10)
.addTo(this);
if(i != 0) {
distanceFromY += Math.max(Diagram.ARC_RADIUS, item.up + Diagram.VERTICAL_SEPARATION + this.items[i-1].down + this.items[i-1].height);
}
}
Path(x + 30, y).right(Diagram.ARC_RADIUS).addTo(this);
normal.format(x + 30 + Diagram.ARC_RADIUS, y, this.innerWidth).addTo(this);
Path(x + 30 + Diagram.ARC_RADIUS + this.innerWidth, y + this.height).right(Diagram.ARC_RADIUS).addTo(this);
for(var i = this.normal+1; i < this.items.length; i++) {
var item = this.items[i];
if(i == this.normal + 1) {
var distanceFromY = Math.max(10+Diagram.ARC_RADIUS, normal.height + normal.down + Diagram.VERTICAL_SEPARATION + item.up);
}
Path(x + 30, y)
.down(distanceFromY - Diagram.ARC_RADIUS)
.arc('ws')
.addTo(this);
item.format(x + 30 + Diagram.ARC_RADIUS, y + distanceFromY, this.innerWidth).addTo(this)
Path(x + 30 + Diagram.ARC_RADIUS + this.innerWidth, y + distanceFromY + item.height)
.arc('se')
.up(distanceFromY - Diagram.ARC_RADIUS + item.height - normal.height)
.addTo(this);
if(i != this.items.length - 1) {
distanceFromY += Math.max(Diagram.ARC_RADIUS, item.height + item.down + Diagram.VERTICAL_SEPARATION + this.items[i+1].up);
}
}
var text = FakeSVG('g', {"class": "diagram-text"}).addTo(this)
FakeSVG('title', {}, (this.type=="any"?"take one or more branches, once each, in any order":"take all branches, once each, in any order")).addTo(text)
FakeSVG('path', {
"d": "M "+(x+30)+" "+(y-10)+" h -26 a 4 4 0 0 0 -4 4 v 12 a 4 4 0 0 0 4 4 h 26 z",
"class": "diagram-text"
}).addTo(text)
FakeSVG('text', {
"x": x + 15,
"y": y + 4,
"class": "diagram-text"
}, (this.type=="any"?"1+":"all")).addTo(text)
FakeSVG('path', {
"d": "M "+(x+this.width-20)+" "+(y-10)+" h 16 a 4 4 0 0 1 4 4 v 12 a 4 4 0 0 1 -4 4 h -16 z",
"class": "diagram-text"
}).addTo(text)
FakeSVG('path', {
"d": "M "+(x+this.width-13)+" "+(y-2)+" a 4 4 0 1 0 6 -1 m 2.75 -1 h -4 v 4 m 0 -3 h 2",
"style": "stroke-width: 1.75"
}).addTo(text)
return this;
};
var Optional = funcs.Optional = function Optional(item, skip) {
if( skip === undefined )
return Choice(1, Skip(), item);
else if ( skip === "skip" )
return Choice(0, Skip(), item);
else
throw "Unknown value for Optional()'s 'skip' argument.";
}
var OneOrMore = funcs.OneOrMore = function OneOrMore(item, rep) {
if(!(this instanceof OneOrMore)) return new OneOrMore(item, rep);
FakeSVG.call(this, 'g');
rep = rep || (new Skip);
this.item = wrapString(item);
this.rep = wrapString(rep);
this.width = Math.max(this.item.width, this.rep.width) + Diagram.ARC_RADIUS*2;
this.height = this.item.height;
this.up = this.item.up;
this.down = Math.max(Diagram.ARC_RADIUS*2, this.item.down + Diagram.VERTICAL_SEPARATION + this.rep.up + this.rep.height + this.rep.down);
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "oneormore"
}
}
subclassOf(OneOrMore, FakeSVG);
OneOrMore.prototype.needsSpace = true;
OneOrMore.prototype.format = function(x,y,width) {
// Hook up the two sides if this is narrower than its stated width.
var gaps = determineGaps(width, this.width);
Path(x,y).h(gaps[0]).addTo(this);
Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this);
x += gaps[0];
// Draw item
Path(x,y).right(Diagram.ARC_RADIUS).addTo(this);
this.item.format(x+Diagram.ARC_RADIUS,y,this.width-Diagram.ARC_RADIUS*2).addTo(this);
Path(x+this.width-Diagram.ARC_RADIUS,y+this.height).right(Diagram.ARC_RADIUS).addTo(this);
// Draw repeat arc
var distanceFromY = Math.max(Diagram.ARC_RADIUS*2, this.item.height+this.item.down+Diagram.VERTICAL_SEPARATION+this.rep.up);
Path(x+Diagram.ARC_RADIUS,y).arc('nw').down(distanceFromY-Diagram.ARC_RADIUS*2).arc('ws').addTo(this);
this.rep.format(x+Diagram.ARC_RADIUS, y+distanceFromY, this.width - Diagram.ARC_RADIUS*2).addTo(this);
Path(x+this.width-Diagram.ARC_RADIUS, y+distanceFromY+this.rep.height).arc('se').up(distanceFromY-Diagram.ARC_RADIUS*2+this.rep.height-this.item.height).arc('en').addTo(this);
return this;
}
OneOrMore.prototype.walk = function(cb) {
cb(this);
this.item.walk(cb);
this.rep.walk(cb);
}
var ZeroOrMore = funcs.ZeroOrMore = function ZeroOrMore(item, rep, skip) {
return Optional(OneOrMore(item, rep), skip);
}
var Start = funcs.Start = function Start({type="simple", label}={}) {
if(!(this instanceof Start)) return new Start({type, label});
FakeSVG.call(this, 'g');
this.width = 20;
this.height = 0;
this.up = 10;
this.down = 10;
this.type = type;
if(label != undefined) {
this.label = ""+label;
this.width = Math.max(20, this.label.length * Diagram.CHAR_WIDTH + 10);
}
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "start"
}
}
subclassOf(Start, FakeSVG);
Start.prototype.format = function(x,y) {
let path = new Path(x, y-10);
if (this.type === "complex") {
path.down(20)
.m(0, -10)
.right(this.width)
.addTo(this);
} else {
path.down(20)
.m(10, -20)
.down(20)
.m(-10, -10)
.right(this.width)
.addTo(this);
}
if(this.label) {
new FakeSVG('text', {x:x, y:y-15, style:"text-anchor:start"}, this.label).addTo(this);
}
return this;
}
var End = funcs.End = function End({type="simple"}={}) {
if(!(this instanceof End)) return new End({type});
FakeSVG.call(this, 'path');
this.width = 20;
this.height = 0;
this.up = 10;
this.down = 10;
this.type = type;
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "end"
}
}
subclassOf(End, FakeSVG);
End.prototype.format = function(x,y) {
if (this.type === "complex") {
this.attrs.d = 'M '+x+' '+y+' h 20 m 0 -10 v 20';
} else {
this.attrs.d = 'M '+x+' '+y+' h 20 m -10 -10 v 20 m 10 -20 v 20';
}
return this;
}
var Terminal = funcs.Terminal = function Terminal(text, {href, title}={}) {
if(!(this instanceof Terminal)) return new Terminal(text, {href, title});
FakeSVG.call(this, 'g', {'class': 'terminal'});
this.text = ""+text;
this.href = href;
this.title = title;
this.width = this.text.length * Diagram.CHAR_WIDTH + 20;
this.height = 0;
this.up = 11;
this.down = 11;
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "terminal"
}
}
subclassOf(Terminal, FakeSVG);
Terminal.prototype.needsSpace = true;
Terminal.prototype.format = function(x, y, width) {
// Hook up the two sides if this is narrower than its stated width.
var gaps = determineGaps(width, this.width);
Path(x,y).h(gaps[0]).addTo(this);
Path(x+gaps[0]+this.width,y).h(gaps[1]).addTo(this);
x += gaps[0];
FakeSVG('rect', {x:x, y:y-11, width:this.width, height:this.up+this.down, rx:10, ry:10}).addTo(this);
var text = FakeSVG('text', {x:x+this.width/2, y:y+4}, this.text);
if(this.href)
FakeSVG('a', {'xlink:href': this.href}, [text]).addTo(this);
else
text.addTo(this);
if(this.title)
new FakeSVG('title', {}, this.title).addTo(this);
return this;
}
var NonTerminal = funcs.NonTerminal = function NonTerminal(text, {href, title}={}) {
if(!(this instanceof NonTerminal)) return new NonTerminal(text, {href, title});
FakeSVG.call(this, 'g', {'class': 'non-terminal'});
this.text = ""+text;
this.href = href;
this.title = title;
this.width = this.text.length * Diagram.CHAR_WIDTH + 20;
this.height = 0;
this.up = 11;
this.down = 11;
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "nonterminal"
}
}
subclassOf(NonTerminal, FakeSVG);
NonTerminal.prototype.needsSpace = true;
NonTerminal.prototype.format = function(x, y, width) {
// Hook up the two sides if this is narrower than its stated width.
var gaps = determineGaps(width, this.width);
Path(x,y).h(gaps[0]).addTo(this);
Path(x+gaps[0]+this.width,y).h(gaps[1]).addTo(this);
x += gaps[0];
FakeSVG('rect', {x:x, y:y-11, width:this.width, height:this.up+this.down}).addTo(this);
var text = FakeSVG('text', {x:x+this.width/2, y:y+4}, this.text);
if(this.href)
FakeSVG('a', {'xlink:href': this.href}, [text]).addTo(this);
else
text.addTo(this);
if(this.title)
new FakeSVG('title', {}, this.title).addTo(this);
return this;
}
var Comment = funcs.Comment = function Comment(text, {href, title}={}) {
if(!(this instanceof Comment)) return new Comment(text, {href, title});
FakeSVG.call(this, 'g');
this.text = ""+text;
this.href = href;
this.title = title;
this.width = this.text.length * Diagram.COMMENT_CHAR_WIDTH + 10;
this.height = 0;
this.up = 11;
this.down = 11;
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "comment"
}
}
subclassOf(Comment, FakeSVG);
Comment.prototype.needsSpace = true;
Comment.prototype.format = function(x, y, width) {
// Hook up the two sides if this is narrower than its stated width.
var gaps = determineGaps(width, this.width);
Path(x,y).h(gaps[0]).addTo(this);
Path(x+gaps[0]+this.width,y+this.height).h(gaps[1]).addTo(this);
x += gaps[0];
var text = FakeSVG('text', {x:x+this.width/2, y:y+5, class:'comment'}, this.text);
if(this.href)
FakeSVG('a', {'xlink:href': this.href}, [text]).addTo(this);
else
text.addTo(this);
if(this.title)
new FakeSVG('title', {}, this.title).addTo(this);
return this;
}
var Skip = funcs.Skip = function Skip() {
if(!(this instanceof Skip)) return new Skip();
FakeSVG.call(this, 'g');
this.width = 0;
this.height = 0;
this.up = 0;
this.down = 0;
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down
this.attrs['data-type'] = "skip"
}
}
subclassOf(Skip, FakeSVG);
Skip.prototype.format = function(x, y, width) {
Path(x,y).right(width).addTo(this);
return this;
}
var Block = funcs.Block = function Block({width=50, up=15, height=25, down=15, needsSpace=true}={}) {
if(!(this instanceof Block)) return new Block({width, up, height, down, needsSpace});
FakeSVG.call(this, 'g');
this.width = width;
this.height = height;
this.up = up;
this.down = down;
this.needsSpace = true;
if(Diagram.DEBUG) {
this.attrs['data-updown'] = this.up + " " + this.height + " " + this.down;
this.attrs['data-type'] = "block"
}
}
subclassOf(Block, FakeSVG);
Block.prototype.format = function(x, y, width) {
// Hook up the two sides if this is narrower than its stated width.
var gaps = determineGaps(width, this.width);
new Path(x,y).h(gaps[0]).addTo(this);
new Path(x+gaps[0]+this.width,y).h(gaps[1]).addTo(this);
x += gaps[0];
new FakeSVG('rect', {x:x, y:y-this.up, width:this.width, height:this.up+this.height+this.down}).addTo(this);
return this;
}
var root;
if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
root = {};
define([], function() {
return root;
});
} else if (typeof exports === 'object') {
// CommonJS for node
root = exports;
} else {
// Browser globals (root is window)
root = this;
}
for(var name in funcs) {
root[name] = funcs[name];
}
}).call(this,
{
VERTICAL_SEPARATION: 8,
ARC_RADIUS: 10,
DIAGRAM_CLASS: 'railroad-diagram',
STROKE_ODD_PIXEL_LENGTH: true,
INTERNAL_ALIGNMENT: 'center',
CHAR_WIDTH: 8.5, // width of each monospace character. play until you find the right value for your font
COMMENT_CHAR_WIDTH: 7, // comments are in smaller text by default
}
);
golang-github-alecthomas-participle-v2-2.1.4/cmd/railroad/main.go 0000664 0000000 0000000 00000012313 15053003664 0024657 0 ustar 00root root 0000000 0000000 // Package main generates Railroad Diagrams from Participle grammar EBNF.
package main
import (
"embed"
"flag"
"fmt"
"os"
"github.com/alecthomas/repr"
"github.com/alecthomas/participle/v2/ebnf"
)
const (
mergeRefThreshold = -1
mergeSizeThreshold = 0
)
type production struct {
*ebnf.Production
refs int
size int
}
// Embed the railroad-diagrams css and js files for later output.
// From here: https://github.com/tabatkins/railroad-diagrams
//
//go:embed assets/*
var assets embed.FS
func generate(productions map[string]*production, n ebnf.Node) (s string) {
switch n := n.(type) {
case *ebnf.EBNF:
s += `
`
for _, p := range n.Productions {
s += generate(productions, p) + "\n"
}
s += "\n"
case *ebnf.Production:
if productions[n.Production].refs <= mergeRefThreshold {
break
}
s += `` + n.Production + "
\n"
s += "\n"
case *ebnf.Expression:
s += "Choice(0, "
for i, a := range n.Alternatives {
if i > 0 {
s += ", "
}
s += generate(productions, a)
}
s += ")"
case *ebnf.SubExpression:
s += generate(productions, n.Expr)
if n.Lookahead != ebnf.LookaheadAssertionNone {
s = fmt.Sprintf(`Group(%s, "?%c")`, s, n.Lookahead)
}
case *ebnf.Sequence:
s += "Sequence("
for i, t := range n.Terms {
if i > 0 {
s += ", "
}
s += generate(productions, t)
}
s += ")"
case *ebnf.Term:
switch n.Repetition {
case "*":
s += "ZeroOrMore("
case "+":
s += "OneOrMore("
case "?":
s += "Optional("
}
switch {
case n.Name != "":
p := productions[n.Name]
if p.refs > mergeRefThreshold {
s += fmt.Sprintf("NonTerminal(%q, {href:\"#%s\"})", n.Name, n.Name)
} else {
s += generate(productions, p.Expression)
}
case n.Group != nil:
s += generate(productions, n.Group)
case n.Literal != "":
s += fmt.Sprintf("Terminal(%s)", n.Literal)
case n.Token != "":
s += fmt.Sprintf("NonTerminal(%q)", n.Token)
default:
panic(repr.String(n))
}
if n.Repetition != "" {
s += ")"
}
if n.Negation {
s = fmt.Sprintf(`Group(%s, "~")`, s)
}
default:
panic(repr.String(n))
}
return
}
func countProductions(productions map[string]*production, n ebnf.Node) (size int) {
switch n := n.(type) {
case *ebnf.EBNF:
for _, p := range n.Productions {
productions[p.Production] = &production{Production: p}
}
for _, p := range n.Productions {
countProductions(productions, p)
}
for _, p := range n.Productions {
if productions[p.Production].size <= mergeSizeThreshold {
productions[p.Production].refs = mergeRefThreshold
}
}
case *ebnf.Production:
productions[n.Production].size = countProductions(productions, n.Expression)
case *ebnf.Expression:
for _, a := range n.Alternatives {
size += countProductions(productions, a)
}
case *ebnf.SubExpression:
size += countProductions(productions, n.Expr)
case *ebnf.Sequence:
for _, t := range n.Terms {
size += countProductions(productions, t)
}
case *ebnf.Term:
if n.Name != "" {
productions[n.Name].refs++
size++
} else if n.Group != nil {
size += countProductions(productions, n.Group)
} else {
size++
}
default:
panic(repr.String(n))
}
return
}
func main() {
fmt.Fprintln(os.Stderr, "Generates railroad diagrams from a Participle EBNF grammar on stdin.")
fmt.Fprintln(os.Stderr, " (EBNF is available from .String() on your parser)")
fmt.Fprintln(os.Stderr, " (Use control-D to end input)")
help := flag.Bool("h", false, "output help and quit")
writeAssets := flag.Bool("w", false, "write css and js files")
outputFile := flag.String("o", "", "file to write html to")
flag.Parse()
if *help {
flag.PrintDefaults()
os.Exit(0)
}
ast, err := ebnf.Parse(os.Stdin)
if err != nil {
panic(err)
}
productions := map[string]*production{}
countProductions(productions, ast)
str := generate(productions, ast)
if *outputFile != "" {
err := os.WriteFile(*outputFile, []byte(str), 0644) // nolint
if err != nil {
panic(err)
}
if *writeAssets {
err := writeAssetFiles()
if err != nil {
panic(err)
}
} else {
fmt.Fprintln(os.Stderr, ">>> Copy railroad-diagrams.{css,js} from https://github.com/tabatkins/railroad-diagrams")
}
fmt.Fprintf(os.Stderr, ">>> File written: %s\n", *outputFile)
} else {
fmt.Println(str)
fmt.Fprintln(os.Stderr, ">>> Copy railroad-diagrams.{css,js} from https://github.com/tabatkins/railroad-diagrams")
}
}
func writeAssetFiles() (err error) {
files, err := assets.ReadDir("assets")
if err != nil {
return
}
for _, f := range files {
fileName := f.Name()
data, err := assets.ReadFile(fmt.Sprintf("assets/%s", fileName))
if err != nil {
return err
}
err = os.WriteFile(fileName, data, 0644) // nolint
if err != nil {
return err
}
fmt.Fprintf(os.Stderr, ">>> File written: %s\n", fileName)
}
return
}
golang-github-alecthomas-participle-v2-2.1.4/context.go 0000664 0000000 0000000 00000006770 15053003664 0023071 0 ustar 00root root 0000000 0000000 package participle
import (
"fmt"
"io"
"reflect"
"strings"
"github.com/alecthomas/participle/v2/lexer"
)
type contextFieldSet struct {
tokens []lexer.Token
strct reflect.Value
field structLexerField
fieldValue []reflect.Value
}
// Context for a single parse.
type parseContext struct {
lexer.PeekingLexer
depth int
trace io.Writer
deepestError error
deepestErrorDepth int
lookahead int
caseInsensitive map[lexer.TokenType]bool
apply []*contextFieldSet
allowTrailing bool
}
func newParseContext(lex *lexer.PeekingLexer, lookahead int, caseInsensitive map[lexer.TokenType]bool) parseContext {
return parseContext{
PeekingLexer: *lex,
caseInsensitive: caseInsensitive,
lookahead: lookahead,
}
}
func (p *parseContext) DeepestError(err error) error {
if p.PeekingLexer.Cursor() >= p.deepestErrorDepth {
return err
}
if p.deepestError != nil {
return p.deepestError
}
return err
}
// Defer adds a function to be applied once a branch has been picked.
func (p *parseContext) Defer(tokens []lexer.Token, strct reflect.Value, field structLexerField, fieldValue []reflect.Value) {
p.apply = append(p.apply, &contextFieldSet{tokens, strct, field, fieldValue})
}
// Apply deferred functions.
func (p *parseContext) Apply() error {
for _, apply := range p.apply {
if err := setField(apply.tokens, apply.strct, apply.field, apply.fieldValue); err != nil {
return err
}
}
p.apply = nil
return nil
}
// Branch accepts the branch as the correct branch.
func (p *parseContext) Accept(branch *parseContext) {
p.apply = append(p.apply, branch.apply...)
p.PeekingLexer = branch.PeekingLexer
if branch.deepestErrorDepth >= p.deepestErrorDepth {
p.deepestErrorDepth = branch.deepestErrorDepth
p.deepestError = branch.deepestError
}
}
// Branch starts a new lookahead branch.
func (p *parseContext) Branch() *parseContext {
branch := &parseContext{}
*branch = *p
branch.apply = nil
return branch
}
func (p *parseContext) MaybeUpdateError(err error) {
if p.PeekingLexer.Cursor() >= p.deepestErrorDepth {
p.deepestError = err
p.deepestErrorDepth = p.PeekingLexer.Cursor()
}
}
// Stop returns true if parsing should terminate after the given "branch" failed to match.
//
// Additionally, track the deepest error in the branch - the deeper the error, the more useful it usually is.
// It could already be the deepest error in the branch (only if deeper than current parent context deepest),
// or it could be "err", the latest error on the branch (even if same depth; the lexer holds the position).
func (p *parseContext) Stop(err error, branch *parseContext) bool {
if branch.deepestErrorDepth > p.deepestErrorDepth {
p.deepestError = branch.deepestError
p.deepestErrorDepth = branch.deepestErrorDepth
} else if branch.PeekingLexer.Cursor() >= p.deepestErrorDepth {
p.deepestError = err
p.deepestErrorDepth = maxInt(branch.PeekingLexer.Cursor(), branch.deepestErrorDepth)
}
if !p.hasInfiniteLookahead() && branch.PeekingLexer.Cursor() > p.PeekingLexer.Cursor()+p.lookahead {
p.Accept(branch)
return true
}
return false
}
func (p *parseContext) hasInfiniteLookahead() bool { return p.lookahead < 0 }
func (p *parseContext) printTrace(n node) func() {
if p.trace != nil {
tok := p.PeekingLexer.Peek()
fmt.Fprintf(p.trace, "%s%q %s\n", strings.Repeat(" ", p.depth*2), tok, n.GoString())
p.depth += 1
return func() { p.depth -= 1 }
}
return func() {}
}
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}
golang-github-alecthomas-participle-v2-2.1.4/doc.go 0000664 0000000 0000000 00000004124 15053003664 0022141 0 ustar 00root root 0000000 0000000 // Package participle constructs parsers from definitions in struct tags and parses directly into
// those structs. The approach is philosophically similar to how other marshallers work in Go,
// "unmarshalling" an instance of a grammar into a struct.
//
// The supported annotation syntax is:
//
// - `@` Capture expression into the field.
// - `@@` Recursively capture using the fields own type.
// - `` Match named lexer token.
// - `( ... )` Group.
// - `"..."` Match the literal (note that the lexer must emit tokens matching this literal exactly).
// - `"...":` Match the literal, specifying the exact lexer token type to match.
// - ` ...` Match expressions.
// - ` | ` Match one of the alternatives.
//
// The following modifiers can be used after any expression:
//
// - `*` Expression can match zero or more times.
// - `+` Expression must match one or more times.
// - `?` Expression can match zero or once.
// - `!` Require a non-empty match (this is useful with a sequence of optional matches eg. `("a"? "b"? "c"?)!`).
//
// Here's an example of an EBNF grammar.
//
// type Group struct {
// Expression *Expression `"(" @@ ")"`
// }
//
// type Option struct {
// Expression *Expression `"[" @@ "]"`
// }
//
// type Repetition struct {
// Expression *Expression `"{" @@ "}"`
// }
//
// type Literal struct {
// Start string `@String` // lexer.Lexer token "String"
// End string `("…" @String)?`
// }
//
// type Term struct {
// Name string ` @Ident`
// Literal *Literal `| @@`
// Group *Group `| @@`
// Option *Option `| @@`
// Repetition *Expression `| "(" @@ ")"`
// }
//
// type Sequence struct {
// Terms []*Term `@@+`
// }
//
// type Expression struct {
// Alternatives []*Sequence `@@ ("|" @@)*`
// }
//
// type Expressions []*Expression
//
// type Production struct {
// Name string `@Ident "="`
// Expressions Expressions `@@+ "."`
// }
//
// type EBNF struct {
// Productions []*Production `@@*`
// }
package participle
golang-github-alecthomas-participle-v2-2.1.4/ebnf.go 0000664 0000000 0000000 00000006024 15053003664 0022307 0 ustar 00root root 0000000 0000000 package participle
import (
"fmt"
"strings"
)
// String returns the EBNF for the grammar.
//
// Productions are always upper cased. Lexer tokens are always lower case.
func (p *Parser[G]) String() string {
return ebnf(p.typeNodes[p.rootType])
}
type ebnfp struct {
name string
out string
}
func ebnf(n node) string {
outp := []*ebnfp{}
switch n.(type) {
case *strct:
buildEBNF(true, n, map[node]bool{}, nil, &outp)
out := []string{}
for _, p := range outp {
out = append(out, fmt.Sprintf("%s = %s .", p.name, p.out))
}
return strings.Join(out, "\n")
default:
out := &ebnfp{}
buildEBNF(true, n, map[node]bool{}, out, &outp)
return out.out
}
}
func buildEBNF(root bool, n node, seen map[node]bool, p *ebnfp, outp *[]*ebnfp) {
switch n := n.(type) {
case *disjunction:
if !root {
p.out += "("
}
for i, next := range n.nodes {
if i > 0 {
p.out += " | "
}
buildEBNF(false, next, seen, p, outp)
}
if !root {
p.out += ")"
}
case *union:
name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:]
if p != nil {
p.out += name
}
if seen[n] {
return
}
p = &ebnfp{name: name}
*outp = append(*outp, p)
seen[n] = true
for i, next := range n.disjunction.nodes {
if i > 0 {
p.out += " | "
}
buildEBNF(false, next, seen, p, outp)
}
case *custom:
name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:]
p.out += name
case *strct:
name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:]
if p != nil {
p.out += name
}
if seen[n] {
return
}
seen[n] = true
p = &ebnfp{name: name}
*outp = append(*outp, p)
buildEBNF(true, n.expr, seen, p, outp)
case *sequence:
group := n.next != nil && !root
if group {
p.out += "("
}
for n != nil {
buildEBNF(false, n.node, seen, p, outp)
n = n.next
if n != nil {
p.out += " "
}
}
if group {
p.out += ")"
}
case *parseable:
p.out += n.t.Name()
case *capture:
buildEBNF(false, n.node, seen, p, outp)
case *reference:
p.out += "<" + strings.ToLower(n.identifier) + ">"
case *negation:
p.out += "~"
buildEBNF(false, n.node, seen, p, outp)
case *literal:
p.out += fmt.Sprintf("%q", n.s)
case *group:
if child, ok := n.expr.(*group); ok && child.mode == groupMatchOnce {
buildEBNF(false, child.expr, seen, p, outp)
} else if child, ok := n.expr.(*capture); ok {
if grandchild, ok := child.node.(*group); ok && grandchild.mode == groupMatchOnce {
buildEBNF(false, grandchild.expr, seen, p, outp)
} else {
buildEBNF(false, n.expr, seen, p, outp)
}
} else {
buildEBNF(false, n.expr, seen, p, outp)
}
switch n.mode {
case groupMatchNonEmpty:
p.out += "!"
case groupMatchZeroOrOne:
p.out += "?"
case groupMatchZeroOrMore:
p.out += "*"
case groupMatchOneOrMore:
p.out += "+"
case groupMatchOnce:
}
case *lookaheadGroup:
if !n.negative {
p.out += "(?= "
} else {
p.out += "(?! "
}
buildEBNF(true, n.expr, seen, p, outp)
p.out += ")"
default:
panic(fmt.Sprintf("unsupported node type %T", n))
}
}
golang-github-alecthomas-participle-v2-2.1.4/ebnf/ 0000775 0000000 0000000 00000000000 15053003664 0021756 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/ebnf/ebnf.go 0000664 0000000 0000000 00000006753 15053003664 0023232 0 ustar 00root root 0000000 0000000 // Package ebnf contains the AST and parser for parsing the form of EBNF produced by Participle.
//
// The self-referential EBNF is:
//
// EBNF = Production* .
// Production = "=" Expression "." .
// Expression = Sequence ("|" Sequence)* .
// SubExpression = "(" ("?!" | "?=")? Expression ")" .
// Sequence = Term+ .
// Term = "~"? ( | | ("<" ">") | SubExpression) ("*" | "+" | "?" | "!")? .
package ebnf
import (
"fmt"
"io"
"github.com/alecthomas/participle/v2"
)
var parser = participle.MustBuild[EBNF]()
// A Node in the EBNF grammar.
type Node interface {
sealed()
}
var _ Node = &Term{}
// Term in the EBNF grammar.
type Term struct {
Negation bool `@("~")?`
Name string `( @Ident`
Literal string ` | @String`
Token string ` | "<" @Ident ">"`
Group *SubExpression ` | @@ )`
Repetition string `@("*" | "+" | "?" | "!")?`
}
func (t *Term) sealed() {}
func (t *Term) String() string {
switch {
case t.Name != "":
return t.Name + t.Repetition
case t.Literal != "":
return t.Literal + t.Repetition
case t.Token != "":
return "<" + t.Token + ">" + t.Repetition
case t.Group != nil:
return t.Group.String() + t.Repetition
default:
panic("??")
}
}
// LookaheadAssertion enum.
type LookaheadAssertion rune
func (l *LookaheadAssertion) sealed() {}
func (l *LookaheadAssertion) Capture(tokens []string) error { // nolint
rn := tokens[0][0]
switch rn {
case '!', '=':
*l = LookaheadAssertion(rn)
default:
panic(rn)
}
return nil
}
// Lookahead assertion enums.
const (
LookaheadAssertionNone LookaheadAssertion = 0
LookaheadAssertionNegative LookaheadAssertion = '!'
LookaheadAssertionPositive LookaheadAssertion = '='
)
var _ Node = &SubExpression{}
// SubExpression is an expression inside parentheses ( ... )
type SubExpression struct {
Lookahead LookaheadAssertion `"(" ("?" @("!" | "="))?`
Expr *Expression `@@ ")"`
}
func (s *SubExpression) sealed() {}
func (s *SubExpression) String() string {
out := "("
if s.Lookahead != LookaheadAssertionNone {
out += "?" + string(s.Lookahead)
}
out += s.Expr.String() + ")"
return out
}
var _ Node = &Sequence{}
// A Sequence of terms.
type Sequence struct {
Terms []*Term `@@+`
}
func (s *Sequence) sealed() {}
func (s *Sequence) String() (out string) {
for i, term := range s.Terms {
if i > 0 {
out += " "
}
out += term.String()
}
return
}
var _ Node = &Expression{}
// Expression is a set of alternatives separated by "|" in the EBNF.
type Expression struct {
Alternatives []*Sequence `@@ ( "|" @@ )*`
}
func (e *Expression) sealed() {}
func (e *Expression) String() (out string) {
for i, seq := range e.Alternatives {
if i > 0 {
out += " | "
}
out += seq.String()
}
return
}
var _ Node = &Production{}
// Production of the grammar.
type Production struct {
Production string `@Ident "="`
Expression *Expression `@@ "."`
}
func (p *Production) sealed() {}
var _ Node = &EBNF{}
// EBNF itself.
type EBNF struct {
Productions []*Production `@@*`
}
func (e *EBNF) sealed() {}
func (e *EBNF) String() (out string) {
for i, production := range e.Productions {
out += fmt.Sprintf("%s = %s .", production.Production, production.Expression)
if i < len(e.Productions)-1 {
out += "\n"
}
}
return
}
// ParseString string into EBNF.
func ParseString(ebnf string) (*EBNF, error) {
return parser.ParseString("", ebnf)
}
// Parse io.Reader into EBNF.
func Parse(r io.Reader) (*EBNF, error) {
return parser.Parse("", r)
}
golang-github-alecthomas-participle-v2-2.1.4/ebnf/ebnf_test.go 0000664 0000000 0000000 00000000400 15053003664 0024250 0 ustar 00root root 0000000 0000000 package ebnf
import (
"testing"
require "github.com/alecthomas/assert/v2"
)
func TestEBNF(t *testing.T) {
input := parser.String()
t.Log(input)
ast, err := ParseString(input)
require.NoError(t, err, input)
require.Equal(t, input, ast.String())
}
golang-github-alecthomas-participle-v2-2.1.4/ebnf_test.go 0000664 0000000 0000000 00000003472 15053003664 0023352 0 ustar 00root root 0000000 0000000 package participle_test
import (
"strings"
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/participle/v2"
)
func TestEBNF(t *testing.T) {
parser := mustTestParser[EBNF](t)
expected := `
EBNF = Production* .
Production = "=" Expression+ "." .
Expression = Sequence ("|" Sequence)* .
Sequence = Term+ .
Term = | Literal | Range | Group | LookaheadGroup | EBNFOption | Repetition | Negation .
Literal = .
Range = "…" .
Group = "(" Expression ")" .
LookaheadGroup = "(" "?" ("=" | "!") Expression ")" .
EBNFOption = "[" Expression "]" .
Repetition = "{" Expression "}" .
Negation = "!" Expression .
`
require.Equal(t, strings.TrimSpace(expected), parser.String())
}
func TestEBNF_Other(t *testing.T) {
type Grammar struct {
PositiveLookahead string ` (?= 'good') @Ident`
NegativeLookahead string `| (?! 'bad' | "worse") @Ident`
Negation string `| !("anything" | 'but')`
}
parser := mustTestParser[Grammar](t)
expected := `Grammar = ((?= "good") ) | ((?! "bad" | "worse") ) | ~("anything" | "but") .`
require.Equal(t, expected, parser.String())
}
type (
EBNFUnion interface{ ebnfUnion() }
EBNFUnionA struct {
A string `@Ident`
}
EBNFUnionB struct {
B string `@String`
}
EBNFUnionC struct {
C string `@Float`
}
)
func (EBNFUnionA) ebnfUnion() {}
func (EBNFUnionB) ebnfUnion() {}
func (EBNFUnionC) ebnfUnion() {}
func TestEBNF_Union(t *testing.T) {
type Grammar struct {
TheUnion EBNFUnion `@@`
}
parser := mustTestParser[Grammar](t, participle.Union[EBNFUnion](EBNFUnionA{}, EBNFUnionB{}, EBNFUnionC{}))
require.Equal(t,
strings.TrimSpace(`
Grammar = EBNFUnion .
EBNFUnion = EBNFUnionA | EBNFUnionB | EBNFUnionC .
EBNFUnionA = .
EBNFUnionB = .
EBNFUnionC = .
`),
parser.String())
}
golang-github-alecthomas-participle-v2-2.1.4/error.go 0000664 0000000 0000000 00000006037 15053003664 0022532 0 ustar 00root root 0000000 0000000 package participle
import (
"fmt"
"github.com/alecthomas/participle/v2/lexer"
)
// Error represents an error while parsing.
//
// The format of an Error is in the form "[:][::] ".
//
// The error will contain positional information if available.
type Error interface {
error
// Unadorned message.
Message() string
// Closest position to error location.
Position() lexer.Position
}
// FormatError formats an error in the form "[:][::] "
func FormatError(err Error) string {
msg := ""
pos := err.Position()
if pos.Filename != "" {
msg += pos.Filename + ":"
}
if pos.Line != 0 || pos.Column != 0 {
msg += fmt.Sprintf("%d:%d:", pos.Line, pos.Column)
}
if msg != "" {
msg += " " + err.Message()
} else {
msg = err.Message()
}
return msg
}
// UnexpectedTokenError is returned by Parse when an unexpected token is encountered.
//
// This is useful for composing parsers in order to detect when a sub-parser has terminated.
type UnexpectedTokenError struct {
Unexpected lexer.Token
Expect string
expectNode node // Usable instead of Expect, delays creating the string representation until necessary
}
func (u *UnexpectedTokenError) Error() string { return FormatError(u) }
func (u *UnexpectedTokenError) Message() string { // nolint: golint
var expected string
if u.expectNode != nil {
expected = fmt.Sprintf(" (expected %s)", u.expectNode)
} else if u.Expect != "" {
expected = fmt.Sprintf(" (expected %s)", u.Expect)
}
return fmt.Sprintf("unexpected token %q%s", u.Unexpected, expected)
}
func (u *UnexpectedTokenError) Position() lexer.Position { return u.Unexpected.Pos } // nolint: golint
// ParseError is returned when a parse error occurs.
//
// It is useful for differentiating between parse errors and other errors such
// as lexing and IO errors.
type ParseError struct {
Msg string
Pos lexer.Position
}
func (p *ParseError) Error() string { return FormatError(p) }
func (p *ParseError) Message() string { return p.Msg }
func (p *ParseError) Position() lexer.Position { return p.Pos }
// Errorf creates a new Error at the given position.
func Errorf(pos lexer.Position, format string, args ...interface{}) Error {
return &ParseError{Msg: fmt.Sprintf(format, args...), Pos: pos}
}
type wrappingParseError struct {
err error
ParseError
}
func (w *wrappingParseError) Unwrap() error { return w.err }
// Wrapf attempts to wrap an existing error in a new message.
//
// If "err" is a participle.Error, its positional information will be used and
// "pos" will be ignored.
//
// The returned error implements the Unwrap() method supported by the errors package.
func Wrapf(pos lexer.Position, err error, format string, args ...interface{}) Error {
var msg string
if perr, ok := err.(Error); ok {
pos = perr.Position()
msg = fmt.Sprintf("%s: %s", fmt.Sprintf(format, args...), perr.Message())
} else {
msg = fmt.Sprintf("%s: %s", fmt.Sprintf(format, args...), err.Error())
}
return &wrappingParseError{err: err, ParseError: ParseError{Msg: msg, Pos: pos}}
}
golang-github-alecthomas-participle-v2-2.1.4/error_test.go 0000664 0000000 0000000 00000005030 15053003664 0023561 0 ustar 00root root 0000000 0000000 package participle_test
import (
"errors"
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
func TestErrorReporting(t *testing.T) {
type cls struct {
Visibility string `@"public"?`
Class string `"class" @Ident`
Bases []string `('(' @Ident (',' @Ident)+ ')')?`
}
type union struct {
Visibility string `@"public"?`
Union string `"union" @Ident`
}
type decl struct {
Class *cls `( @@`
Union *union ` | @@ )`
}
type grammar struct {
Decls []*decl `( @@ ";" )*`
}
p := mustTestParser[grammar](t, participle.UseLookahead(5))
ast, err := p.ParseString("", `public class A(B, C); class D; public union A;`)
require.NoError(t, err)
require.Equal(t, &grammar{Decls: []*decl{
{Class: &cls{Visibility: "public", Class: "A", Bases: []string{"B", "C"}}},
{Class: &cls{Class: "D"}},
{Union: &union{Visibility: "public", Union: "A"}},
}}, ast)
_, err = p.ParseString("", `public struct Bar;`)
require.EqualError(t, err, `1:8: unexpected token "struct" (expected "union" )`)
_, err = p.ParseString("", `public class 1;`)
require.EqualError(t, err, `1:14: unexpected token "1" (expected ("(" ("," )+ ")")?)`)
_, err = p.ParseString("", `public class A(B,C,);`)
require.EqualError(t, err, `1:20: unexpected token ")" (expected )`)
}
func TestMoreThanOneErrors(t *testing.T) {
type unionMatchAtLeastOnce struct {
Ident string `( @Ident `
String string `| @String+ `
Float float64 `| @Float )`
}
type union struct {
Ident string `( @Ident `
String string `| @String `
Float float64 `| @Float )`
}
pAtLeastOnce := mustTestParser[unionMatchAtLeastOnce](t, participle.Unquote("String"))
p := mustTestParser[union](t, participle.Unquote("String"))
ast, err := pAtLeastOnce.ParseString("", `"a string" "two strings"`)
require.NoError(t, err)
require.Equal(t, &unionMatchAtLeastOnce{String: "a stringtwo strings"}, ast)
_, err = p.ParseString("", `102`)
require.EqualError(t, err, `1:1: unexpected token "102"`)
_, err = pAtLeastOnce.ParseString("", `102`)
// ensure we don't get a "+1:1: sub-expression + must match at least once" error
require.EqualError(t, err, `1:1: unexpected token "102"`)
}
func TestErrorWrap(t *testing.T) {
expected := errors.New("badbad")
err := participle.Wrapf(lexer.Position{Line: 1, Column: 1}, expected, "bad: %s", "thing")
require.Equal(t, expected, errors.Unwrap(err))
require.Equal(t, "1:1: bad: thing: badbad", err.Error())
}
golang-github-alecthomas-participle-v2-2.1.4/go.mod 0000664 0000000 0000000 00000000306 15053003664 0022151 0 ustar 00root root 0000000 0000000 module github.com/alecthomas/participle/v2
go 1.18
require (
github.com/alecthomas/assert/v2 v2.11.0
github.com/alecthomas/repr v0.4.0
)
require github.com/hexops/gotextdiff v1.0.3 // indirect
golang-github-alecthomas-participle-v2-2.1.4/go.sum 0000664 0000000 0000000 00000001306 15053003664 0022177 0 ustar 00root root 0000000 0000000 github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU=
github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
golang-github-alecthomas-participle-v2-2.1.4/grammar.go 0000664 0000000 0000000 00000025266 15053003664 0023034 0 ustar 00root root 0000000 0000000 package participle
import (
"fmt"
"reflect"
"text/scanner"
"github.com/alecthomas/participle/v2/lexer"
)
type generatorContext struct {
lexer.Definition
typeNodes map[reflect.Type]node
symbolsToIDs map[lexer.TokenType]string
}
func newGeneratorContext(lex lexer.Definition) *generatorContext {
return &generatorContext{
Definition: lex,
typeNodes: map[reflect.Type]node{},
symbolsToIDs: lexer.SymbolsByRune(lex),
}
}
func (g *generatorContext) addUnionDefs(defs []unionDef) error {
unionNodes := make([]*union, len(defs))
for i, def := range defs {
if _, exists := g.typeNodes[def.typ]; exists {
return fmt.Errorf("duplicate definition for interface or union type %s", def.typ)
}
unionNode := &union{
unionDef: def,
disjunction: disjunction{nodes: make([]node, 0, len(def.members))},
}
g.typeNodes[def.typ], unionNodes[i] = unionNode, unionNode
}
for i, def := range defs {
unionNode := unionNodes[i]
for _, memberType := range def.members {
memberNode, err := g.parseType(memberType)
if err != nil {
return err
}
unionNode.disjunction.nodes = append(unionNode.disjunction.nodes, memberNode)
}
}
return nil
}
func (g *generatorContext) addCustomDefs(defs []customDef) error {
for _, def := range defs {
if _, exists := g.typeNodes[def.typ]; exists {
return fmt.Errorf("duplicate definition for interface or union type %s", def.typ)
}
g.typeNodes[def.typ] = &custom{typ: def.typ, parseFn: def.parseFn}
}
return nil
}
// Takes a type and builds a tree of nodes out of it.
func (g *generatorContext) parseType(t reflect.Type) (_ node, returnedError error) {
t = indirectType(t)
if n, ok := g.typeNodes[t]; ok {
if s, ok := n.(*strct); ok {
s.usages++
}
return n, nil
}
if t.Implements(parseableType) {
return &parseable{t.Elem()}, nil
}
if reflect.PtrTo(t).Implements(parseableType) {
return &parseable{t}, nil
}
switch t.Kind() { // nolint: exhaustive
case reflect.Slice, reflect.Ptr:
t = indirectType(t.Elem())
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("expected a struct but got %T", t)
}
fallthrough
case reflect.Struct:
slexer, err := lexStruct(t)
if err != nil {
return nil, err
}
out := newStrct(t)
g.typeNodes[t] = out // Ensure we avoid infinite recursion.
if slexer.NumField() == 0 {
return nil, fmt.Errorf("can not parse into empty struct %s", t)
}
defer decorate(&returnedError, func() string { return slexer.Field().Name })
e, err := g.parseDisjunction(slexer)
if err != nil {
return nil, err
}
if e == nil {
return nil, fmt.Errorf("no grammar found in %s", t)
}
if token, _ := slexer.Peek(); !token.EOF() {
return nil, fmt.Errorf("unexpected input %q", token.Value)
}
out.expr = e
return out, nil
}
return nil, fmt.Errorf("%s should be a struct or should implement the Parseable interface", t)
}
func (g *generatorContext) parseDisjunction(slexer *structLexer) (node, error) {
out := &disjunction{}
for {
n, err := g.parseSequence(slexer)
if err != nil {
return nil, err
}
if n == nil {
return nil, fmt.Errorf("alternative expression %d cannot be empty", len(out.nodes)+1)
}
out.nodes = append(out.nodes, n)
if token, _ := slexer.Peek(); token.Type != '|' {
break
}
_, err = slexer.Next() // |
if err != nil {
return nil, err
}
}
if len(out.nodes) == 1 {
return out.nodes[0], nil
}
return out, nil
}
func (g *generatorContext) parseSequence(slexer *structLexer) (node, error) {
head := &sequence{}
cursor := head
loop:
for {
if token, err := slexer.Peek(); err != nil {
return nil, err
} else if token.Type == lexer.EOF {
break loop
}
term, err := g.parseTerm(slexer, true)
if err != nil {
return nil, err
}
if term == nil {
break loop
}
if cursor.node == nil {
cursor.head = true
cursor.node = term
} else {
cursor.next = &sequence{node: term}
cursor = cursor.next
}
}
if head.node == nil {
return nil, nil
}
if head.next == nil {
return head.node, nil
}
return head, nil
}
func (g *generatorContext) parseTermNoModifiers(slexer *structLexer, allowUnknown bool) (node, error) {
t, err := slexer.Peek()
if err != nil {
return nil, err
}
switch t.Type {
case '@':
return g.parseCapture(slexer)
case scanner.String, scanner.RawString, scanner.Char:
return g.parseLiteral(slexer)
case '!', '~':
return g.parseNegation(slexer)
case '[':
return g.parseOptional(slexer)
case '{':
return g.parseRepetition(slexer)
case '(':
// Also handles (? used for lookahead groups
return g.parseGroup(slexer)
case scanner.Ident:
return g.parseReference(slexer)
case lexer.EOF:
_, _ = slexer.Next()
return nil, nil
default:
if allowUnknown {
return nil, nil
}
return nil, fmt.Errorf("unexpected token %v", t)
}
}
func (g *generatorContext) parseTerm(slexer *structLexer, allowUnknown bool) (node, error) {
out, err := g.parseTermNoModifiers(slexer, allowUnknown)
if err != nil {
return nil, err
}
return g.parseModifier(slexer, out)
}
// Parse modifiers: ?, *, + and/or !
func (g *generatorContext) parseModifier(slexer *structLexer, expr node) (node, error) {
out := &group{expr: expr}
t, err := slexer.Peek()
if err != nil {
return nil, err
}
switch t.Type {
case '!':
out.mode = groupMatchNonEmpty
case '+':
out.mode = groupMatchOneOrMore
case '*':
out.mode = groupMatchZeroOrMore
case '?':
out.mode = groupMatchZeroOrOne
default:
return expr, nil
}
_, _ = slexer.Next()
return out, nil
}
// @ captures into the current field.
func (g *generatorContext) parseCapture(slexer *structLexer) (node, error) {
_, _ = slexer.Next()
token, err := slexer.Peek()
if err != nil {
return nil, err
}
field := slexer.Field()
if token.Type == '@' {
_, _ = slexer.Next()
n, err := g.parseType(field.Type)
if err != nil {
return nil, err
}
return &capture{field, n}, nil
}
ft := indirectType(field.Type)
if ft.Kind() == reflect.Struct && ft != tokenType && ft != tokensType && !implements(ft, captureType) && !implements(ft, textUnmarshalerType) {
return nil, fmt.Errorf("%s: structs can only be parsed with @@ or by implementing the Capture or encoding.TextUnmarshaler interfaces", ft)
}
n, err := g.parseTermNoModifiers(slexer, false)
if err != nil {
return nil, err
}
return &capture{field, n}, nil
}
// A reference in the form refers to a named token from the lexer.
func (g *generatorContext) parseReference(slexer *structLexer) (node, error) { // nolint: interfacer
token, err := slexer.Next()
if err != nil {
return nil, err
}
if token.Type != scanner.Ident {
return nil, fmt.Errorf("expected identifier but got %q", token)
}
typ, ok := g.Symbols()[token.Value]
if !ok {
return nil, fmt.Errorf("unknown token type %q", token)
}
return &reference{typ: typ, identifier: token.Value}, nil
}
// [ ] optionally matches .
func (g *generatorContext) parseOptional(slexer *structLexer) (node, error) {
_, _ = slexer.Next() // [
disj, err := g.parseDisjunction(slexer)
if err != nil {
return nil, err
}
n := &group{expr: disj, mode: groupMatchZeroOrOne}
next, err := slexer.Next()
if err != nil {
return nil, err
}
if next.Type != ']' {
return nil, fmt.Errorf("expected ] but got %q", next)
}
return n, nil
}
// { } matches 0 or more repititions of
func (g *generatorContext) parseRepetition(slexer *structLexer) (node, error) {
_, _ = slexer.Next() // {
disj, err := g.parseDisjunction(slexer)
if err != nil {
return nil, err
}
n := &group{expr: disj, mode: groupMatchZeroOrMore}
next, err := slexer.Next()
if err != nil {
return nil, err
}
if next.Type != '}' {
return nil, fmt.Errorf("expected } but got %q", next)
}
return n, nil
}
// ( ) groups a sub-expression
func (g *generatorContext) parseGroup(slexer *structLexer) (node, error) {
_, _ = slexer.Next() // (
peek, err := slexer.Peek()
if err != nil {
return nil, err
}
if peek.Type == '?' {
return g.subparseLookaheadGroup(slexer) // If there was an error peeking, code below will handle it
}
expr, err := g.subparseGroup(slexer)
if err != nil {
return nil, err
}
return &group{expr: expr}, nil
}
// (?[!=] ) requires a grouped sub-expression either matches or doesn't match, without consuming it
func (g *generatorContext) subparseLookaheadGroup(slexer *structLexer) (node, error) {
_, _ = slexer.Next() // ? - the opening ( was already consumed in parseGroup
var negative bool
next, err := slexer.Next()
if err != nil {
return nil, err
}
switch next.Type {
case '=':
negative = false
case '!':
negative = true
default:
return nil, fmt.Errorf("expected = or ! but got %q", next)
}
expr, err := g.subparseGroup(slexer)
if err != nil {
return nil, err
}
return &lookaheadGroup{expr: expr, negative: negative}, nil
}
// helper parsing ) to finish parsing groups or lookahead groups
func (g *generatorContext) subparseGroup(slexer *structLexer) (node, error) {
disj, err := g.parseDisjunction(slexer)
if err != nil {
return nil, err
}
next, err := slexer.Next() // )
if err != nil {
return nil, err
}
if next.Type != ')' {
return nil, fmt.Errorf("expected ) but got %q", next)
}
return disj, nil
}
// A token negation
//
// Accepts both the form !"some-literal" and !SomeNamedToken
func (g *generatorContext) parseNegation(slexer *structLexer) (node, error) {
_, _ = slexer.Next() // advance the parser since we have '!' right now.
next, err := g.parseTermNoModifiers(slexer, false)
if err != nil {
return nil, err
}
return &negation{next}, nil
}
// A literal string.
//
// Note that for this to match, the tokeniser must be able to produce this string. For example,
// if the tokeniser only produces individual characters but the literal is "hello", or vice versa.
func (g *generatorContext) parseLiteral(lex *structLexer) (node, error) { // nolint: interfacer
token, err := lex.Next()
if err != nil {
return nil, err
}
s := token.Value
t := lexer.TokenType(-1)
token, err = lex.Peek()
if err != nil {
return nil, err
}
if token.Type == ':' {
_, _ = lex.Next()
token, err = lex.Next()
if err != nil {
return nil, err
}
if token.Type != scanner.Ident {
return nil, fmt.Errorf("expected identifier for literal type constraint but got %q", token)
}
var ok bool
t, ok = g.Symbols()[token.Value]
if !ok {
return nil, fmt.Errorf("unknown token type %q in literal type constraint", token)
}
}
return &literal{s: s, t: t, tt: g.symbolsToIDs[t]}, nil
}
func indirectType(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice {
return indirectType(t.Elem())
}
return t
}
func implements(t, i reflect.Type) bool {
return t.Implements(i) || reflect.PtrTo(t).Implements(i)
}
golang-github-alecthomas-participle-v2-2.1.4/grammar_test.go 0000664 0000000 0000000 00000003375 15053003664 0024070 0 ustar 00root root 0000000 0000000 package participle_test
import (
"testing"
require "github.com/alecthomas/assert/v2"
"github.com/alecthomas/participle/v2"
)
func TestBuild_Errors_Negation(t *testing.T) {
type grammar struct {
Whatever string `'a' | ! | 'b'`
}
_, err := participle.Build[grammar]()
require.EqualError(t, err, "Whatever: unexpected token |")
}
func TestBuild_Errors_Capture(t *testing.T) {
type grammar struct {
Whatever string `'a' | @ | 'b'`
}
_, err := participle.Build[grammar]()
require.EqualError(t, err, "Whatever: unexpected token |")
}
func TestBuild_Errors_UnclosedGroup(t *testing.T) {
type grammar struct {
Whatever string `'a' | ('b' | 'c'`
}
_, err := participle.Build[grammar]()
require.EqualError(t, err, `Whatever: expected ) but got ""`)
}
func TestBuild_Errors_LookaheadGroup(t *testing.T) {
type grammar struct {
Whatever string `'a' | (?? 'what') | 'b'`
}
_, err := participle.Build[grammar]()
require.EqualError(t, err, `Whatever: expected = or ! but got "?"`)
}
func TestBuild_Colon_OK(t *testing.T) {
type grammar struct {
TokenTypeTest bool ` 'TokenTypeTest' : Ident`
DoubleCapture string `| 'DoubleCapture' ":" @Ident`
SinglePresent bool `| 'SinglePresent' ':' Ident`
SingleCapture string `| 'SingleCapture' ':' @Ident`
}
parser, err := participle.Build[grammar]()
require.NoError(t, err)
require.Equal(t, `Grammar = "TokenTypeTest"`+
` | ("DoubleCapture" ":" )`+
` | ("SinglePresent" ":" )`+
` | ("SingleCapture" ":" ) .`, parser.String())
}
func TestBuild_Colon_MissingTokenType(t *testing.T) {
type grammar struct {
Key string `'name' : @Ident`
}
_, err := participle.Build[grammar]()
require.EqualError(t, err, `Key: expected identifier for literal type constraint but got "@"`)
}
golang-github-alecthomas-participle-v2-2.1.4/lexer/ 0000775 0000000 0000000 00000000000 15053003664 0022163 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/lexer/api.go 0000664 0000000 0000000 00000010755 15053003664 0023273 0 ustar 00root root 0000000 0000000 package lexer
import (
"fmt"
"io"
"strings"
"unicode/utf8"
)
type TokenType int
const (
// EOF represents an end of file.
EOF TokenType = -(iota + 1)
)
// EOFToken creates a new EOF token at the given position.
func EOFToken(pos Position) Token {
return Token{Type: EOF, Pos: pos}
}
// Definition is the main entry point for lexing.
type Definition interface {
// Symbols returns a map of symbolic names to the corresponding pseudo-runes for those symbols.
// This is the same approach as used by text/scanner. For example, "EOF" might have the rune
// value of -1, "Ident" might be -2, and so on.
Symbols() map[string]TokenType
// Lex an io.Reader.
Lex(filename string, r io.Reader) (Lexer, error)
}
// StringDefinition is an optional interface lexer Definition's can implement
// to offer a fast path for lexing strings.
type StringDefinition interface {
LexString(filename string, input string) (Lexer, error)
}
// BytesDefinition is an optional interface lexer Definition's can implement
// to offer a fast path for lexing byte slices.
type BytesDefinition interface {
LexBytes(filename string, input []byte) (Lexer, error)
}
// A Lexer returns tokens from a source.
type Lexer interface {
// Next consumes and returns the next token.
Next() (Token, error)
}
// SymbolsByRune returns a map of lexer symbol names keyed by rune.
func SymbolsByRune(def Definition) map[TokenType]string {
symbols := def.Symbols()
out := make(map[TokenType]string, len(symbols))
for s, r := range symbols {
out[r] = s
}
return out
}
// NameOfReader attempts to retrieve the filename of a reader.
func NameOfReader(r interface{}) string {
if nr, ok := r.(interface{ Name() string }); ok {
return nr.Name()
}
return ""
}
// Must takes the result of a Definition constructor call and returns the definition, but panics if
// it errors
//
// eg.
//
// lex = lexer.Must(lexer.Build(`Symbol = "symbol" .`))
func Must(def Definition, err error) Definition {
if err != nil {
panic(err)
}
return def
}
// ConsumeAll reads all tokens from a Lexer.
func ConsumeAll(lexer Lexer) ([]Token, error) {
tokens := make([]Token, 0, 1024)
for {
token, err := lexer.Next()
if err != nil {
return nil, err
}
tokens = append(tokens, token)
if token.Type == EOF {
return tokens, nil
}
}
}
// Position of a token.
type Position struct {
Filename string
Offset int
Line int
Column int
}
// Advance the Position based on the number of characters and newlines in "span".
func (p *Position) Advance(span string) {
p.Offset += len(span)
lines := strings.Count(span, "\n")
p.Line += lines
// Update column.
if lines == 0 {
p.Column += utf8.RuneCountInString(span)
} else {
p.Column = utf8.RuneCountInString(span[strings.LastIndex(span, "\n"):])
}
}
// Add returns a new Position that is the sum of this position and "pos".
//
// This is useful when parsing values from a parent grammar.
func (p Position) Add(pos Position) Position {
p.Line += pos.Line - 1
if pos.Line > 1 {
p.Column = pos.Column
} else {
p.Column += pos.Column - 1
}
p.Offset += pos.Offset
return p
}
func (p Position) GoString() string {
return fmt.Sprintf("Position{Filename: %q, Offset: %d, Line: %d, Column: %d}",
p.Filename, p.Offset, p.Line, p.Column)
}
func (p Position) String() string {
filename := p.Filename
if filename == "" {
return fmt.Sprintf("%d:%d", p.Line, p.Column)
}
return fmt.Sprintf("%s:%d:%d", filename, p.Line, p.Column)
}
// A Token returned by a Lexer.
type Token struct {
// Type of token. This is the value keyed by symbol as returned by Definition.Symbols().
Type TokenType
Value string
Pos Position
}
// EOF returns true if this Token is an EOF token.
func (t Token) EOF() bool {
return t.Type == EOF
}
func (t Token) String() string {
if t.EOF() {
return ""
}
return t.Value
}
func (t Token) GoString() string {
if t.Pos == (Position{}) {
return fmt.Sprintf("Token{%d, %q}", t.Type, t.Value)
}
return fmt.Sprintf("Token@%s{%d, %q}", t.Pos.String(), t.Type, t.Value)
}
// MakeSymbolTable builds a lookup table for checking token ID existence.
//
// For each symbolic name in "types", the returned map will contain the corresponding token ID as a key.
func MakeSymbolTable(def Definition, types ...string) (map[TokenType]bool, error) {
symbols := def.Symbols()
table := make(map[TokenType]bool, len(types))
for _, symbol := range types {
rn, ok := symbols[symbol]
if !ok {
return nil, fmt.Errorf("lexer does not support symbol %q", symbol)
}
table[rn] = true
}
return table, nil
}
golang-github-alecthomas-participle-v2-2.1.4/lexer/doc.go 0000664 0000000 0000000 00000002473 15053003664 0023265 0 ustar 00root root 0000000 0000000 // Package lexer defines interfaces and implementations used by Participle to perform lexing.
//
// The primary interfaces are Definition and Lexer. There are two concrete implementations
// included. The first is one based on Go's text/scanner package. The second is Participle's
// default stateful/modal lexer.
//
// The stateful lexer is based heavily on the approach used by Chroma (and Pygments).
//
// It is a state machine defined by a map of rules keyed by state. Each rule
// is a named regex and optional operation to apply when the rule matches.
//
// As a convenience, any Rule starting with a lowercase letter will be elided from output.
//
// Lexing starts in the "Root" group. Each rule is matched in order, with the first
// successful match producing a lexeme. If the matching rule has an associated Action
// it will be executed.
//
// A state change can be introduced with the Action `Push(state)`. `Pop()` will
// return to the previous state.
//
// To reuse rules from another state, use `Include(state)`.
//
// As a special case, regexes containing backrefs in the form \N (where N is a digit)
// will match the corresponding capture group from the immediate parent group. This
// can be used to parse, among other things, heredocs.
//
// See the README, example and tests in this package for details.
package lexer
golang-github-alecthomas-participle-v2-2.1.4/lexer/errors.go 0000664 0000000 0000000 00000002277 15053003664 0024036 0 ustar 00root root 0000000 0000000 package lexer
import "fmt"
// This file exists to break circular imports. The types and functions in here
// mirror those in the participle package.
type errorInterface interface {
error
Message() string
Position() Position
}
// Error represents an error while lexing.
//
// It complies with the participle.Error interface.
type Error struct {
Msg string
Pos Position
}
var _ errorInterface = &Error{}
// Creates a new Error at the given position.
func errorf(pos Position, format string, args ...interface{}) *Error {
return &Error{Msg: fmt.Sprintf(format, args...), Pos: pos}
}
func (e *Error) Message() string { return e.Msg } // nolint: golint
func (e *Error) Position() Position { return e.Pos } // nolint: golint
// Error formats the error with FormatError.
func (e *Error) Error() string { return formatError(e.Pos, e.Msg) }
// An error in the form "[:][::] "
func formatError(pos Position, message string) string {
msg := ""
if pos.Filename != "" {
msg += pos.Filename + ":"
}
if pos.Line != 0 || pos.Column != 0 {
msg += fmt.Sprintf("%d:%d:", pos.Line, pos.Column)
}
if msg != "" {
msg += " " + message
} else {
msg = message
}
return msg
}
golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/ 0000775 0000000 0000000 00000000000 15053003664 0023777 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/basiclexer.go 0000664 0000000 0000000 00000024575 15053003664 0026464 0 ustar 00root root 0000000 0000000 // Code generated by Participle. DO NOT EDIT.
package internal
import (
"fmt"
"io"
"regexp/syntax"
"strings"
"sync"
"unicode/utf8"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var _ syntax.Op
var _ fmt.State
const _ = utf8.RuneError
var GeneratedBasicBackRefCache sync.Map
var GeneratedBasicLexer lexer.Definition = lexerGeneratedBasicDefinitionImpl{}
type lexerGeneratedBasicDefinitionImpl struct{}
func (lexerGeneratedBasicDefinitionImpl) Symbols() map[string]lexer.TokenType {
return map[string]lexer.TokenType{
"Comment": -7,
"EOF": -1,
"EOL": -6,
"Ident": -4,
"Number": -3,
"Punct": -5,
"String": -2,
"Whitespace": -8,
}
}
func (lexerGeneratedBasicDefinitionImpl) LexString(filename string, s string) (lexer.Lexer, error) {
return &lexerGeneratedBasicImpl{
s: s,
pos: lexer.Position{
Filename: filename,
Line: 1,
Column: 1,
},
states: []lexerGeneratedBasicState{{name: "Root"}},
}, nil
}
func (d lexerGeneratedBasicDefinitionImpl) LexBytes(filename string, b []byte) (lexer.Lexer, error) {
return d.LexString(filename, string(b))
}
func (d lexerGeneratedBasicDefinitionImpl) Lex(filename string, r io.Reader) (lexer.Lexer, error) {
s := &strings.Builder{}
_, err := io.Copy(s, r)
if err != nil {
return nil, err
}
return d.LexString(filename, s.String())
}
type lexerGeneratedBasicState struct {
name string
groups []string
}
type lexerGeneratedBasicImpl struct {
s string
p int
pos lexer.Position
states []lexerGeneratedBasicState
}
func (l *lexerGeneratedBasicImpl) Next() (lexer.Token, error) {
if l.p == len(l.s) {
return lexer.EOFToken(l.pos), nil
}
var (
state = l.states[len(l.states)-1]
groups []int
sym lexer.TokenType
)
switch state.name {
case "Root":
if match := matchGeneratedBasicString(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 {
sym = -2
groups = match[:]
} else if match := matchGeneratedBasicNumber(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 {
sym = -3
groups = match[:]
} else if match := matchGeneratedBasicIdent(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 {
sym = -4
groups = match[:]
} else if match := matchGeneratedBasicPunct(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 {
sym = -5
groups = match[:]
} else if match := matchGeneratedBasicEOL(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 {
sym = -6
groups = match[:]
} else if match := matchGeneratedBasicComment(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 {
sym = -7
groups = match[:]
} else if match := matchGeneratedBasicWhitespace(l.s, l.p, l.states[len(l.states)-1].groups); match[1] != 0 {
sym = -8
groups = match[:]
}
}
if groups == nil {
sample := []rune(l.s[l.p:])
if len(sample) > 16 {
sample = append(sample[:16], []rune("...")...)
}
return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", string(sample))
}
pos := l.pos
span := l.s[groups[0]:groups[1]]
l.p = groups[1]
l.pos.Advance(span)
return lexer.Token{
Type: sym,
Value: span,
Pos: pos,
}, nil
}
func (l *lexerGeneratedBasicImpl) sgroups(match []int) []string {
sgroups := make([]string, len(match)/2)
for i := 0; i < len(match)-1; i += 2 {
sgroups[i/2] = l.s[l.p+match[i] : l.p+match[i+1]]
}
return sgroups
}
// "(\\"|[^"])*"
func matchGeneratedBasicString(s string, p int, backrefs []string) (groups [4]int) {
// " (Literal)
l0 := func(s string, p int) int {
if p < len(s) && s[p] == '"' {
return p + 1
}
return -1
}
// \\" (Literal)
l1 := func(s string, p int) int {
if p+2 <= len(s) && s[p:p+2] == "\\\"" {
return p + 2
}
return -1
}
// [^"] (CharClass)
l2 := func(s string, p int) int {
if len(s) <= p {
return -1
}
var (
rn rune
n int
)
if s[p] < utf8.RuneSelf {
rn, n = rune(s[p]), 1
} else {
rn, n = utf8.DecodeRuneInString(s[p:])
}
switch {
case rn >= '\x00' && rn <= '!':
return p + 1
case rn >= '#' && rn <= '\U0010ffff':
return p + n
}
return -1
}
// \\"|[^"] (Alternate)
l3 := func(s string, p int) int {
if np := l1(s, p); np != -1 {
return np
}
if np := l2(s, p); np != -1 {
return np
}
return -1
}
// (\\"|[^"]) (Capture)
l4 := func(s string, p int) int {
np := l3(s, p)
if np != -1 {
groups[2] = p
groups[3] = np
}
return np
}
// (\\"|[^"])* (Star)
l5 := func(s string, p int) int {
for len(s) > p {
if np := l4(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
// "(\\"|[^"])*" (Concat)
l6 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
if p = l5(s, p); p == -1 {
return -1
}
if p = l0(s, p); p == -1 {
return -1
}
return p
}
np := l6(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// [\+\-]?([0-9]*\.)?[0-9]+
func matchGeneratedBasicNumber(s string, p int, backrefs []string) (groups [4]int) {
// [\+\-] (CharClass)
l0 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
if rn == '+' || rn == '-' {
return p + 1
}
return -1
}
// [\+\-]? (Quest)
l1 := func(s string, p int) int {
if np := l0(s, p); np != -1 {
return np
}
return p
}
// [0-9] (CharClass)
l2 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
switch {
case rn >= '0' && rn <= '9':
return p + 1
}
return -1
}
// [0-9]* (Star)
l3 := func(s string, p int) int {
for len(s) > p {
if np := l2(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
// \. (Literal)
l4 := func(s string, p int) int {
if p < len(s) && s[p] == '.' {
return p + 1
}
return -1
}
// [0-9]*\. (Concat)
l5 := func(s string, p int) int {
if p = l3(s, p); p == -1 {
return -1
}
if p = l4(s, p); p == -1 {
return -1
}
return p
}
// ([0-9]*\.) (Capture)
l6 := func(s string, p int) int {
np := l5(s, p)
if np != -1 {
groups[2] = p
groups[3] = np
}
return np
}
// ([0-9]*\.)? (Quest)
l7 := func(s string, p int) int {
if np := l6(s, p); np != -1 {
return np
}
return p
}
// [0-9]+ (Plus)
l8 := func(s string, p int) int {
if p = l2(s, p); p == -1 {
return -1
}
for len(s) > p {
if np := l2(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
// [\+\-]?([0-9]*\.)?[0-9]+ (Concat)
l9 := func(s string, p int) int {
if p = l1(s, p); p == -1 {
return -1
}
if p = l7(s, p); p == -1 {
return -1
}
if p = l8(s, p); p == -1 {
return -1
}
return p
}
np := l9(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// [A-Z_a-z][0-9A-Z_a-z]*
func matchGeneratedBasicIdent(s string, p int, backrefs []string) (groups [2]int) {
// [A-Z_a-z] (CharClass)
l0 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
switch {
case rn >= 'A' && rn <= 'Z':
return p + 1
case rn == '_':
return p + 1
case rn >= 'a' && rn <= 'z':
return p + 1
}
return -1
}
// [0-9A-Z_a-z] (CharClass)
l1 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
switch {
case rn >= '0' && rn <= '9':
return p + 1
case rn >= 'A' && rn <= 'Z':
return p + 1
case rn == '_':
return p + 1
case rn >= 'a' && rn <= 'z':
return p + 1
}
return -1
}
// [0-9A-Z_a-z]* (Star)
l2 := func(s string, p int) int {
for len(s) > p {
if np := l1(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
// [A-Z_a-z][0-9A-Z_a-z]* (Concat)
l3 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
if p = l2(s, p); p == -1 {
return -1
}
return p
}
np := l3(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// [!-/:-@\[-`\{-~]+
func matchGeneratedBasicPunct(s string, p int, backrefs []string) (groups [2]int) {
// [!-/:-@\[-`\{-~] (CharClass)
l0 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
switch {
case rn >= '!' && rn <= '/':
return p + 1
case rn >= ':' && rn <= '@':
return p + 1
case rn >= '[' && rn <= '`':
return p + 1
case rn >= '{' && rn <= '~':
return p + 1
}
return -1
}
// [!-/:-@\[-`\{-~]+ (Plus)
l1 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
for len(s) > p {
if np := l0(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
np := l1(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// \n
func matchGeneratedBasicEOL(s string, p int, backrefs []string) (groups [2]int) {
if p < len(s) && s[p] == '\n' {
groups[0] = p
groups[1] = p + 1
}
return
}
// (?i:REM)[^\n]*(?i:\n)
func matchGeneratedBasicComment(s string, p int, backrefs []string) (groups [2]int) {
// (?i:REM) (Literal)
l0 := func(s string, p int) int {
if p+3 <= len(s) && strings.EqualFold(s[p:p+3], "REM") {
return p + 3
}
return -1
}
// [^\n] (CharClass)
l1 := func(s string, p int) int {
if len(s) <= p {
return -1
}
var (
rn rune
n int
)
if s[p] < utf8.RuneSelf {
rn, n = rune(s[p]), 1
} else {
rn, n = utf8.DecodeRuneInString(s[p:])
}
switch {
case rn >= '\x00' && rn <= '\t':
return p + 1
case rn >= '\v' && rn <= '\U0010ffff':
return p + n
}
return -1
}
// [^\n]* (Star)
l2 := func(s string, p int) int {
for len(s) > p {
if np := l1(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
// (?i:\n) (Literal)
l3 := func(s string, p int) int {
if p < len(s) && s[p] == '\n' {
return p + 1
}
return -1
}
// (?i:REM)[^\n]*(?i:\n) (Concat)
l4 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
if p = l2(s, p); p == -1 {
return -1
}
if p = l3(s, p); p == -1 {
return -1
}
return p
}
np := l4(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
// [\t ]+
func matchGeneratedBasicWhitespace(s string, p int, backrefs []string) (groups [2]int) {
// [\t ] (CharClass)
l0 := func(s string, p int) int {
if len(s) <= p {
return -1
}
rn := s[p]
if rn == '\t' || rn == ' ' {
return p + 1
}
return -1
}
// [\t ]+ (Plus)
l1 := func(s string, p int) int {
if p = l0(s, p); p == -1 {
return -1
}
for len(s) > p {
if np := l0(s, p); np == -1 {
return p
} else {
p = np
}
}
return p
}
np := l1(s, p)
if np == -1 {
return
}
groups[0] = p
groups[1] = np
return
}
golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/basiclexer.json 0000664 0000000 0000000 00000000776 15053003664 0027025 0 ustar 00root root 0000000 0000000 {
"Root": [
{
"name": "String",
"pattern": "\"(\\\\\"|[^\"])*\""
},
{
"name": "Number",
"pattern": "[-+]?(\\d*\\.)?\\d+"
},
{
"name": "Ident",
"pattern": "[a-zA-Z_]\\w*"
},
{
"name": "Punct",
"pattern": "[!-/:-@[-`{-~]+"
},
{
"name": "EOL",
"pattern": "\\n"
},
{
"name": "Comment",
"pattern": "(?i)rem[^\\n]*\\n"
},
{
"name": "Whitespace",
"pattern": "[ \\t]+"
}
]
} golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/conformance/ 0000775 0000000 0000000 00000000000 15053003664 0026271 5 ustar 00root root 0000000 0000000 golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/conformance/conformance_codegen_test.go 0000664 0000000 0000000 00000000471 15053003664 0033637 0 ustar 00root root 0000000 0000000 //go:build generated
package conformance_test
import (
"testing"
"github.com/alecthomas/participle/v2/lexer/internal/conformance"
)
// This should only be run by TestLexerConformanceGenerated.
func TestLexerConformanceGeneratedInternal(t *testing.T) {
testLexer(t, conformance.GeneratedConformanceLexer)
}
golang-github-alecthomas-participle-v2-2.1.4/lexer/internal/conformance/conformance_test.go 0000664 0000000 0000000 00000016126 15053003664 0032157 0 ustar 00root root 0000000 0000000 package conformance_test
import (
"encoding/json"
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/alecthomas/assert/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var conformanceLexer = lexer.MustStateful(lexer.Rules{
"Root": {
{"ExprTest", `EXPRTEST:`, lexer.Push("ExprTest")},
{"LiteralTest", `LITTEST:`, lexer.Push("LiteralTest")},
{"CaseInsensitiveTest", `CITEST:`, lexer.Push("CaseInsensitiveTest")},
// Use this to test \b at very start of the string!
{"WordBoundaryTest", `\bWBTEST:`, lexer.Push("WordBoundaryTest")},
},
"ExprTest": {
{"ExprString", `"`, lexer.Push("ExprString")},
// {"ExprHeredoc", `<<(\w+)`, lexer.Push("ExprHeredoc")},
},
"ExprString": {
{"ExprEscaped", `\\.`, nil},
{"ExprStringEnd", `"`, lexer.Pop()},
{"Expr", `\${`, lexer.Push("Expr")},
{"ExprChar", `[^$"\\]+`, nil},
},
"Expr": {
lexer.Include("ExprTest"),
{`Whitespace`, `\s+`, nil},
{`ExprOper`, `[-+/*%]`, nil},
{"Ident", `\w+`, lexer.Push("ExprReference")},
{"ExprEnd", `}`, lexer.Pop()},
},
"ExprReference": {
{"ExprDot", `\.`, nil},
{"Ident", `\w+`, nil},
lexer.Return(),
},
// "ExprHeredoc": {
// {"ExprHeredocEnd", `\1`, lexer.Pop()},
// lexer.Include("Expr"),
// },
"LiteralTest": {
{`LITOne`, `ONE`, nil},
{`LITKeyword`, `SELECT|FROM|WHERE|LIKE`, nil},
{"Ident", `\w+`, nil},
{"Whitespace", `\s+`, nil},
},
"CaseInsensitiveTest": {
{`ABCWord`, `[aA][bB][cC]`, nil},
{`CIKeyword`, `(?i)(SELECT|from|WHERE|LIKE)`, nil},
{"Ident", `\w+`, nil},
{"Whitespace", `\s+`, nil},
},
"WordBoundaryTest": {
{`WBKeyword`, `\b(?:abc|xyz)\b`, nil},
{`WBGroupKeyword`, `(?:90|0)\b`, nil},
{"Slash", `/`, nil},
{"Ident", `\w+`, nil},
{"Whitespace", `\s+`, nil},
},
})
type token struct {
Type string
Value string
}
func testLexer(t *testing.T, lex lexer.Definition) {
t.Helper()
tests := []struct {
name string
input string
expected []token
}{
{"ExprPush", `EXPRTEST:"${"Hello ${name + "!"}"}"`, []token{
{"ExprString", "\""},
{"Expr", "${"},
{"ExprString", "\""},
{"ExprChar", "Hello "},
{"Expr", "${"},
{"Ident", "name"},
{"Whitespace", " "},
{"ExprOper", "+"},
{"Whitespace", " "},
{"ExprString", "\""},
{"ExprChar", "!"},
{"ExprStringEnd", "\""},
{"ExprEnd", "}"},
{"ExprStringEnd", "\""},
{"ExprEnd", "}"},
{"ExprStringEnd", "\""},
}},
{"ExprReference", `EXPRTEST:"${user.name}"`, []token{
{"ExprString", "\""},
{"Expr", "${"},
{"Ident", "user"},
{"ExprDot", "."},
{"Ident", "name"},
{"ExprEnd", "}"},
{"ExprStringEnd", "\""},
}},
// TODO(alecthomas): Once backreferences are supported, this will work.
// {"Backref", `EXPRTEST:<