dependencies/0000775000000000000000000000000015024302466012206 5ustar rootrootdependencies/pkg/0000775000000000000000000000000015024302472012764 5ustar rootrootdependencies/pkg/mod/0000775000000000000000000000000015024302470013541 5ustar rootrootdependencies/pkg/mod/github.com/0000775000000000000000000000000015024302472015602 5ustar rootrootdependencies/pkg/mod/github.com/jessevdk/0000775000000000000000000000000015024302467017424 5ustar rootrootdependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/0000755000000000000000000000000015024302467021773 5ustar rootrootdependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/help_test.go0000644000000000000000000005351415024302467024321 0ustar rootrootpackage flags import ( "bufio" "bytes" "errors" "fmt" "os" "runtime" "strconv" "strings" "testing" "time" ) type helpOptions struct { Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information" ini-name:"verbose"` Call func(string) `short:"c" description:"Call phone number" ini-name:"call"` PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` EmptyDescription bool `long:"empty-description"` Default string `long:"default" default:"Some\nvalue" description:"Test default value"` DefaultArray []string `long:"default-array" default:"Some value" default:"Other\tvalue" description:"Test default array value"` DefaultMap map[string]string `long:"default-map" default:"some:value" default:"another:value" description:"Testdefault map value"` EnvDefault1 string `long:"env-default1" default:"Some value" env:"ENV_DEFAULT" description:"Test env-default1 value"` EnvDefault2 string `long:"env-default2" env:"ENV_DEFAULT" description:"Test env-default2 value"` OptionWithArgName string `long:"opt-with-arg-name" value-name:"something" description:"Option with named argument"` OptionWithChoices string `long:"opt-with-choices" value-name:"choice" choice:"dog" choice:"cat" description:"Option with choices"` Hidden string `long:"hidden" description:"Hidden option" hidden:"yes"` HiddenOptionWithVeryLongName bool `long:"this-hidden-option-has-a-ridiculously-long-name" hidden:"yes"` OnlyIni string `ini-name:"only-ini" description:"Option only available in ini"` Other struct { StringSlice []string `short:"s" default:"some" default:"value" description:"A slice of strings"` IntMap map[string]int `long:"intmap" default:"a:1" description:"A map from string to int" ini-name:"int-map"` } `group:"Other Options"` HiddenGroup struct { InsideHiddenGroup string `long:"inside-hidden-group" description:"Inside hidden group"` Padder bool `long:"this-option-in-a-hidden-group-has-a-ridiculously-long-name"` } `group:"Hidden group" hidden:"yes"` GroupWithOnlyHiddenOptions struct { SecretFlag bool `long:"secret" description:"Hidden flag in a non-hidden group" hidden:"yes"` } `group:"Non-hidden group with only hidden options"` Group struct { Opt string `long:"opt" description:"This is a subgroup option"` HiddenInsideGroup string `long:"hidden-inside-group" description:"Hidden inside group" hidden:"yes"` NotHiddenInsideGroup string `long:"not-hidden-inside-group" description:"Not hidden inside group" hidden:"false"` Group struct { Opt string `long:"opt" description:"This is a subsubgroup option"` } `group:"Subsubgroup" namespace:"sap"` } `group:"Subgroup" namespace:"sip"` Bommand struct { Hidden bool `long:"hidden" description:"A hidden option" hidden:"yes"` } `command:"bommand" description:"A command with only hidden options"` Command struct { ExtraVerbose []bool `long:"extra-verbose" description:"Use for extra verbosity"` } `command:"command" alias:"cm" alias:"cmd" description:"A command"` HiddenCommand struct { ExtraVerbose []bool `long:"extra-verbose" description:"Use for extra verbosity"` } `command:"hidden-command" description:"A hidden command" hidden:"yes"` ParentCommand struct { Opt string `long:"opt" description:"This is a parent command option"` SubCommand struct { Opt string `long:"opt" description:"This is a sub command option"` } `command:"sub" description:"A sub command"` } `command:"parent" description:"A parent command"` Args struct { Filename string `positional-arg-name:"filename" description:"A filename with a long description to trigger line wrapping"` Number int `positional-arg-name:"num" description:"A number"` HiddenInHelp float32 `positional-arg-name:"hidden-in-help" required:"yes"` } `positional-args:"yes"` } func TestHelp(t *testing.T) { oldEnv := EnvSnapshot() defer oldEnv.Restore() os.Setenv("ENV_DEFAULT", "env-def") var opts helpOptions p := NewNamedParser("TestHelp", HelpFlag) p.AddGroup("Application Options", "The application options", &opts) _, err := p.ParseArgs([]string{"--help"}) if err == nil { t.Fatalf("Expected help error") } if e, ok := err.(*Error); !ok { t.Fatalf("Expected flags.Error, but got %T", err) } else { if e.Type != ErrHelp { t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) } var expected string if runtime.GOOS == "windows" { expected = `Usage: TestHelp [OPTIONS] [filename] [num] hidden-in-help Application Options: /v, /verbose Show verbose debug information /c: Call phone number /ptrslice: A slice of pointers to string /empty-description /default: Test default value (default: "Some\nvalue") /default-array: Test default array value (default: Some value, "Other\tvalue") /default-map: Testdefault map value (default: some:value, another:value) /env-default1: Test env-default1 value (default: Some value) [%ENV_DEFAULT%] /env-default2: Test env-default2 value [%ENV_DEFAULT%] /opt-with-arg-name:something Option with named argument /opt-with-choices:choice[dog|cat] Option with choices Other Options: /s: A slice of strings (default: some, value) /intmap: A map from string to int (default: a:1) Subgroup: /sip.opt: This is a subgroup option /sip.not-hidden-inside-group: Not hidden inside group Subsubgroup: /sip.sap.opt: This is a subsubgroup option Help Options: /? Show this help message /h, /help Show this help message Arguments: filename: A filename with a long description to trigger line wrapping num: A number Available commands: bommand A command with only hidden options command A command (aliases: cm, cmd) parent A command with a sub command ` } else { expected = `Usage: TestHelp [OPTIONS] [filename] [num] hidden-in-help Application Options: -v, --verbose Show verbose debug information -c= Call phone number --ptrslice= A slice of pointers to string --empty-description --default= Test default value (default: "Some\nvalue") --default-array= Test default array value (default: Some value, "Other\tvalue") --default-map= Testdefault map value (default: some:value, another:value) --env-default1= Test env-default1 value (default: Some value) [$ENV_DEFAULT] --env-default2= Test env-default2 value [$ENV_DEFAULT] --opt-with-arg-name=something Option with named argument --opt-with-choices=choice[dog|cat] Option with choices Other Options: -s= A slice of strings (default: some, value) --intmap= A map from string to int (default: a:1) Subgroup: --sip.opt= This is a subgroup option --sip.not-hidden-inside-group= Not hidden inside group Subsubgroup: --sip.sap.opt= This is a subsubgroup option Help Options: -h, --help Show this help message Arguments: filename: A filename with a long description to trigger line wrapping num: A number Available commands: bommand A command with only hidden options command A command (aliases: cm, cmd) parent A parent command ` } assertDiff(t, e.Message, expected, "help message") } } func TestMan(t *testing.T) { oldEnv := EnvSnapshot() defer oldEnv.Restore() os.Setenv("ENV_DEFAULT", "env-def") var opts helpOptions p := NewNamedParser("TestMan", HelpFlag) p.ShortDescription = "Test manpage generation" p.LongDescription = "This is a somewhat `longer' description of what this does.\nWith multiple lines." p.AddGroup("Application Options", "The application options", &opts) for _, cmd := range p.Commands() { cmd.LongDescription = fmt.Sprintf("Longer `%s' description", cmd.Name) } var buf bytes.Buffer p.WriteManPage(&buf) got := buf.String() tt := time.Now() source_date_epoch := os.Getenv("SOURCE_DATE_EPOCH") if source_date_epoch != "" { sde, err := strconv.ParseInt(source_date_epoch, 10, 64) if err != nil { panic(fmt.Sprintf("Invalid SOURCE_DATE_EPOCH: %s", err)) } tt = time.Unix(sde, 0) } var envDefaultName string if runtime.GOOS == "windows" { envDefaultName = "%ENV_DEFAULT%" } else { envDefaultName = "$ENV_DEFAULT" } expected := fmt.Sprintf(`.TH TestMan 1 "%s" .SH NAME TestMan \- Test manpage generation .SH SYNOPSIS \fBTestMan\fP [OPTIONS] .SH DESCRIPTION This is a somewhat \fBlonger\fP description of what this does. With multiple lines. .SH OPTIONS .SS Application Options The application options .TP \fB\fB\-v\fR, \fB\-\-verbose\fR\fP Show verbose debug information .TP \fB\fB\-c\fR\fP Call phone number .TP \fB\fB\-\-ptrslice\fR\fP A slice of pointers to string .TP \fB\fB\-\-empty-description\fR\fP .TP \fB\fB\-\-default\fR \fP Test default value .TP \fB\fB\-\-default-array\fR \fP Test default array value .TP \fB\fB\-\-default-map\fR \fP Testdefault map value .TP \fB\fB\-\-env-default1\fR \fP Test env-default1 value .TP \fB\fB\-\-env-default2\fR \fP Test env-default2 value .TP \fB\fB\-\-opt-with-arg-name\fR \fIsomething\fR\fP Option with named argument .TP \fB\fB\-\-opt-with-choices\fR \fIchoice\fR\fP Option with choices .SS Other Options .TP \fB\fB\-s\fR \fP A slice of strings .TP \fB\fB\-\-intmap\fR \fP A map from string to int .SS Subgroup .TP \fB\fB\-\-sip.opt\fR\fP This is a subgroup option .TP \fB\fB\-\-sip.not-hidden-inside-group\fR\fP Not hidden inside group .SS Subsubgroup .TP \fB\fB\-\-sip.sap.opt\fR\fP This is a subsubgroup option .SH COMMANDS .SS bommand A command with only hidden options Longer \fBbommand\fP description .SS command A command Longer \fBcommand\fP description \fBUsage\fP: TestMan [OPTIONS] command [command-OPTIONS] .TP \fBAliases\fP: cm, cmd .TP \fB\fB\-\-extra-verbose\fR\fP Use for extra verbosity .SS parent A parent command Longer \fBparent\fP description \fBUsage\fP: TestMan [OPTIONS] parent [parent-OPTIONS] .TP .TP \fB\fB\-\-opt\fR\fP This is a parent command option .SS parent sub A sub command \fBUsage\fP: TestMan [OPTIONS] parent [parent-OPTIONS] sub [sub-OPTIONS] .TP .TP \fB\fB\-\-opt\fR\fP This is a sub command option `, tt.Format("2 January 2006"), envDefaultName) assertDiff(t, got, expected, "man page") } type helpCommandNoOptions struct { Command struct { } `command:"command" description:"A command"` } func TestHelpCommand(t *testing.T) { oldEnv := EnvSnapshot() defer oldEnv.Restore() os.Setenv("ENV_DEFAULT", "env-def") var opts helpCommandNoOptions p := NewNamedParser("TestHelpCommand", HelpFlag) p.AddGroup("Application Options", "The application options", &opts) _, err := p.ParseArgs([]string{"command", "--help"}) if err == nil { t.Fatalf("Expected help error") } if e, ok := err.(*Error); !ok { t.Fatalf("Expected flags.Error, but got %T", err) } else { if e.Type != ErrHelp { t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) } var expected string if runtime.GOOS == "windows" { expected = `Usage: TestHelpCommand [OPTIONS] command Help Options: /? Show this help message /h, /help Show this help message ` } else { expected = `Usage: TestHelpCommand [OPTIONS] command Help Options: -h, --help Show this help message ` } assertDiff(t, e.Message, expected, "help message") } } func TestHiddenCommandNoBuiltinHelp(t *testing.T) { oldEnv := EnvSnapshot() defer oldEnv.Restore() os.Setenv("ENV_DEFAULT", "env-def") // no auto added help group p := NewNamedParser("TestHelpCommand", 0) // and no usage information either p.Usage = "" // add custom help group which is not listed in --help output var help struct { ShowHelp func() error `short:"h" long:"help"` } help.ShowHelp = func() error { return &Error{Type: ErrHelp} } hlpgrp, err := p.AddGroup("Help Options", "", &help) if err != nil { t.Fatalf("Unexpected error: %v", err) } hlpgrp.Hidden = true hlp := p.FindOptionByLongName("help") hlp.Description = "Show this help message" // make sure the --help option is hidden hlp.Hidden = true // add a hidden command var hiddenCmdOpts struct { Foo bool `short:"f" long:"very-long-foo-option" description:"Very long foo description"` Bar bool `short:"b" description:"Option bar"` Positional struct { PositionalFoo string `positional-arg-name:"" description:"positional foo"` } `positional-args:"yes"` } cmdHidden, err := p.Command.AddCommand("hidden", "Hidden command description", "Long hidden command description", &hiddenCmdOpts) if err != nil { t.Fatalf("Unexpected error: %v", err) } // make it hidden cmdHidden.Hidden = true if len(cmdHidden.Options()) != 2 { t.Fatalf("unexpected options count") } // which help we ask for explicitly _, err = p.ParseArgs([]string{"hidden", "--help"}) if err == nil { t.Fatalf("Expected help error") } if e, ok := err.(*Error); !ok { t.Fatalf("Expected flags.Error, but got %T", err) } else { if e.Type != ErrHelp { t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) } var expected string if runtime.GOOS == "windows" { expected = `Usage: TestHelpCommand hidden [hidden-OPTIONS] [] Long hidden command description [hidden command arguments] : positional foo ` } else { expected = `Usage: TestHelpCommand hidden [hidden-OPTIONS] [] Long hidden command description [hidden command arguments] : positional foo ` } h := &bytes.Buffer{} p.WriteHelp(h) assertDiff(t, h.String(), expected, "help message") } } func TestHelpDefaults(t *testing.T) { var expected string if runtime.GOOS == "windows" { expected = `Usage: TestHelpDefaults [OPTIONS] Application Options: /with-default: With default (default: default-value) /without-default: Without default /with-programmatic-default: With programmatic default (default: default-value) Help Options: /? Show this help message /h, /help Show this help message ` } else { expected = `Usage: TestHelpDefaults [OPTIONS] Application Options: --with-default= With default (default: default-value) --without-default= Without default --with-programmatic-default= With programmatic default (default: default-value) Help Options: -h, --help Show this help message ` } tests := []struct { Args []string Output string }{ { Args: []string{"-h"}, Output: expected, }, { Args: []string{"--with-default", "other-value", "--with-programmatic-default", "other-value", "-h"}, Output: expected, }, } for _, test := range tests { var opts struct { WithDefault string `long:"with-default" default:"default-value" description:"With default"` WithoutDefault string `long:"without-default" description:"Without default"` WithProgrammaticDefault string `long:"with-programmatic-default" description:"With programmatic default"` } opts.WithProgrammaticDefault = "default-value" p := NewNamedParser("TestHelpDefaults", HelpFlag) p.AddGroup("Application Options", "The application options", &opts) _, err := p.ParseArgs(test.Args) if err == nil { t.Fatalf("Expected help error") } if e, ok := err.(*Error); !ok { t.Fatalf("Expected flags.Error, but got %T", err) } else { if e.Type != ErrHelp { t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type) } assertDiff(t, e.Message, test.Output, "help message") } } } func TestHelpRestArgs(t *testing.T) { opts := struct { Verbose bool `short:"v"` }{} p := NewNamedParser("TestHelpDefaults", HelpFlag) p.AddGroup("Application Options", "The application options", &opts) retargs, err := p.ParseArgs([]string{"-h", "-v", "rest"}) if err == nil { t.Fatalf("Expected help error") } assertStringArray(t, retargs, []string{"-v", "rest"}) } func TestWrapText(t *testing.T) { s := "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." got := wrapText(s, 60, " ") expected := `Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.` assertDiff(t, got, expected, "wrapped text") } func TestWrapParagraph(t *testing.T) { s := "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\n\n" s += "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\n\n" s += "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\n\n" s += "Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n" got := wrapText(s, 60, " ") expected := `Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. ` assertDiff(t, got, expected, "wrapped paragraph") } func TestHelpDefaultMask(t *testing.T) { var tests = []struct { opts interface{} present string }{ { opts: &struct { Value string `short:"v" default:"123" description:"V"` }{}, present: "V (default: 123)\n", }, { opts: &struct { Value string `short:"v" default:"123" default-mask:"abc" description:"V"` }{}, present: "V (default: abc)\n", }, { opts: &struct { Value string `short:"v" default:"123" default-mask:"-" description:"V"` }{}, present: "V\n", }, { opts: &struct { Value string `short:"v" description:"V"` }{Value: "123"}, present: "V (default: 123)\n", }, { opts: &struct { Value string `short:"v" default-mask:"abc" description:"V"` }{Value: "123"}, present: "V (default: abc)\n", }, { opts: &struct { Value string `short:"v" default-mask:"-" description:"V"` }{Value: "123"}, present: "V\n", }, } for _, test := range tests { p := NewParser(test.opts, HelpFlag) _, err := p.ParseArgs([]string{"-h"}) if flagsErr, ok := err.(*Error); ok && flagsErr.Type == ErrHelp { err = nil } if err != nil { t.Fatalf("Unexpected error: %v", err) } h := &bytes.Buffer{} w := bufio.NewWriter(h) p.writeHelpOption(w, p.FindOptionByShortName('v'), p.getAlignmentInfo()) w.Flush() if strings.Index(h.String(), test.present) < 0 { t.Errorf("Not present %q\n%s", test.present, h.String()) } } } func TestWroteHelp(t *testing.T) { type testInfo struct { value error isHelp bool } tests := map[string]testInfo{ "No error": {value: nil, isHelp: false}, "Plain error": {value: errors.New("an error"), isHelp: false}, "ErrUnknown": {value: newError(ErrUnknown, "an error"), isHelp: false}, "ErrHelp": {value: newError(ErrHelp, "an error"), isHelp: true}, } for name, test := range tests { t.Run(name, func(t *testing.T) { res := WroteHelp(test.value) if test.isHelp != res { t.Errorf("Expected %t, got %t", test.isHelp, res) } }) } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/examples/0000755000000000000000000000000015024302467023611 5ustar rootrootdependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/examples/bash-completion0000644000000000000000000000027115024302467026620 0ustar rootroot_examples() { args=("${COMP_WORDS[@]:1:$COMP_CWORD}") local IFS=$'\n' COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) return 1 } complete -F _examples examples dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/examples/rm.go0000644000000000000000000000067415024302467024565 0ustar rootrootpackage main import ( "fmt" ) type RmCommand struct { Force bool `short:"f" long:"force" description:"Force removal of files"` } var rmCommand RmCommand func (x *RmCommand) Execute(args []string) error { fmt.Printf("Removing (force=%v): %#v\n", x.Force, args) return nil } func init() { parser.AddCommand("rm", "Remove a file", "The rm command removes a file to the repository. Use -f to force removal of files.", &rmCommand) } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/examples/main.go0000644000000000000000000000333015024302467025063 0ustar rootrootpackage main import ( "errors" "fmt" "github.com/jessevdk/go-flags" "os" "strconv" "strings" ) type EditorOptions struct { Input flags.Filename `short:"i" long:"input" description:"Input file" default:"-"` Output flags.Filename `short:"o" long:"output" description:"Output file" default:"-"` } type Point struct { X, Y int } func (p *Point) UnmarshalFlag(value string) error { parts := strings.Split(value, ",") if len(parts) != 2 { return errors.New("expected two numbers separated by a ,") } x, err := strconv.ParseInt(parts[0], 10, 32) if err != nil { return err } y, err := strconv.ParseInt(parts[1], 10, 32) if err != nil { return err } p.X = int(x) p.Y = int(y) return nil } func (p Point) MarshalFlag() (string, error) { return fmt.Sprintf("%d,%d", p.X, p.Y), nil } type Options struct { // Example of verbosity with level Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` // Example of optional value User string `short:"u" long:"user" description:"User name" optional:"yes" optional-value:"pancake"` // Example of map with multiple default values Users map[string]string `long:"users" description:"User e-mail map" default:"system:system@example.org" default:"admin:admin@example.org"` // Example of option group Editor EditorOptions `group:"Editor Options"` // Example of custom type Marshal/Unmarshal Point Point `long:"point" description:"A x,y point" default:"1,2"` } var options Options var parser = flags.NewParser(&options, flags.Default) func main() { if _, err := parser.Parse(); err != nil { switch flagsErr := err.(type) { case flags.ErrorType: if flagsErr == flags.ErrHelp { os.Exit(0) } os.Exit(1) default: os.Exit(1) } } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/examples/add.go0000644000000000000000000000064115024302467024671 0ustar rootrootpackage main import ( "fmt" ) type AddCommand struct { All bool `short:"a" long:"all" description:"Add all files"` } var addCommand AddCommand func (x *AddCommand) Execute(args []string) error { fmt.Printf("Adding (all=%v): %#v\n", x.All, args) return nil } func init() { parser.AddCommand("add", "Add a file", "The add command adds a file to the repository. Use -a to add all files.", &addCommand) } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/command.go0000644000000000000000000002465115024302467023750 0ustar rootrootpackage flags import ( "reflect" "sort" "strconv" "strings" ) // Command represents an application command. Commands can be added to the // parser (which itself is a command) and are selected/executed when its name // is specified on the command line. The Command type embeds a Group and // therefore also carries a set of command specific options. type Command struct { // Embedded, see Group for more information *Group // The name by which the command can be invoked Name string // The active sub command (set by parsing) or nil Active *Command // Whether subcommands are optional SubcommandsOptional bool // Aliases for the command Aliases []string // Whether positional arguments are required ArgsRequired bool // Whether to pass all arguments after the first non option as remaining // command line arguments. This is equivalent to strict POSIX processing. // This is command-local version of PassAfterNonOption Parser flag. It // cannot be turned off when PassAfterNonOption Parser flag is set. PassAfterNonOption bool commands []*Command hasBuiltinHelpGroup bool args []*Arg } // Commander is an interface which can be implemented by any command added in // the options. When implemented, the Execute method will be called for the last // specified (sub)command providing the remaining command line arguments. type Commander interface { // Execute will be called for the last active (sub)command. The // args argument contains the remaining command line arguments. The // error that Execute returns will be eventually passed out of the // Parse method of the Parser. Execute(args []string) error } // Usage is an interface which can be implemented to show a custom usage string // in the help message shown for a command. type Usage interface { // Usage is called for commands to allow customized printing of command // usage in the generated help message. Usage() string } type lookup struct { shortNames map[string]*Option longNames map[string]*Option commands map[string]*Command } // AddCommand adds a new command to the parser with the given name and data. The // data needs to be a pointer to a struct from which the fields indicate which // options are in the command. The provided data can implement the Command and // Usage interfaces. func (c *Command) AddCommand(command string, shortDescription string, longDescription string, data interface{}) (*Command, error) { cmd := newCommand(command, shortDescription, longDescription, data) cmd.parent = c if err := cmd.scan(); err != nil { return nil, err } c.commands = append(c.commands, cmd) return cmd, nil } // AddGroup adds a new group to the command with the given name and data. The // data needs to be a pointer to a struct from which the fields indicate which // options are in the group. func (c *Command) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) { group := newGroup(shortDescription, longDescription, data) group.parent = c if err := group.scanType(c.scanSubcommandHandler(group)); err != nil { return nil, err } c.groups = append(c.groups, group) return group, nil } // Commands returns a list of subcommands of this command. func (c *Command) Commands() []*Command { return c.commands } // Find locates the subcommand with the given name and returns it. If no such // command can be found Find will return nil. func (c *Command) Find(name string) *Command { for _, cc := range c.commands { if cc.match(name) { return cc } } return nil } // FindOptionByLongName finds an option that is part of the command, or any of // its parent commands, by matching its long name (including the option // namespace). func (c *Command) FindOptionByLongName(longName string) (option *Option) { for option == nil && c != nil { option = c.Group.FindOptionByLongName(longName) c, _ = c.parent.(*Command) } return option } // FindOptionByShortName finds an option that is part of the command, or any of // its parent commands, by matching its long name (including the option // namespace). func (c *Command) FindOptionByShortName(shortName rune) (option *Option) { for option == nil && c != nil { option = c.Group.FindOptionByShortName(shortName) c, _ = c.parent.(*Command) } return option } // Args returns a list of positional arguments associated with this command. func (c *Command) Args() []*Arg { ret := make([]*Arg, len(c.args)) copy(ret, c.args) return ret } func newCommand(name string, shortDescription string, longDescription string, data interface{}) *Command { return &Command{ Group: newGroup(shortDescription, longDescription, data), Name: name, } } func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler { f := func(realval reflect.Value, sfield *reflect.StructField) (bool, error) { mtag := newMultiTag(string(sfield.Tag)) if err := mtag.Parse(); err != nil { return true, err } positional := mtag.Get("positional-args") if len(positional) != 0 { stype := realval.Type() for i := 0; i < stype.NumField(); i++ { field := stype.Field(i) m := newMultiTag((string(field.Tag))) if err := m.Parse(); err != nil { return true, err } name := m.Get("positional-arg-name") if len(name) == 0 { name = field.Name } required := -1 requiredMaximum := -1 sreq := m.Get("required") if sreq != "" { required = 1 rng := strings.SplitN(sreq, "-", 2) if len(rng) > 1 { if preq, err := strconv.ParseInt(rng[0], 10, 32); err == nil { required = int(preq) } if preq, err := strconv.ParseInt(rng[1], 10, 32); err == nil { requiredMaximum = int(preq) } } else { if preq, err := strconv.ParseInt(sreq, 10, 32); err == nil { required = int(preq) } } } arg := &Arg{ Name: name, Description: m.Get("description"), Required: required, RequiredMaximum: requiredMaximum, value: realval.Field(i), tag: m, } c.args = append(c.args, arg) if len(mtag.Get("required")) != 0 { c.ArgsRequired = true } } return true, nil } subcommand := mtag.Get("command") if len(subcommand) != 0 { var ptrval reflect.Value if realval.Kind() == reflect.Ptr { ptrval = realval if ptrval.IsNil() { ptrval.Set(reflect.New(ptrval.Type().Elem())) } } else { ptrval = realval.Addr() } shortDescription := mtag.Get("description") longDescription := mtag.Get("long-description") subcommandsOptional := mtag.Get("subcommands-optional") aliases := mtag.GetMany("alias") passAfterNonOption := mtag.Get("pass-after-non-option") subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface()) if err != nil { return true, err } subc.Hidden = mtag.Get("hidden") != "" if len(subcommandsOptional) > 0 { subc.SubcommandsOptional = true } if len(aliases) > 0 { subc.Aliases = aliases } if len(passAfterNonOption) > 0 { subc.PassAfterNonOption = true } return true, nil } return parentg.scanSubGroupHandler(realval, sfield) } return f } func (c *Command) scan() error { return c.scanType(c.scanSubcommandHandler(c.Group)) } func (c *Command) eachOption(f func(*Command, *Group, *Option)) { c.eachCommand(func(c *Command) { c.eachGroup(func(g *Group) { for _, option := range g.options { f(c, g, option) } }) }, true) } func (c *Command) eachCommand(f func(*Command), recurse bool) { f(c) for _, cc := range c.commands { if recurse { cc.eachCommand(f, true) } else { f(cc) } } } func (c *Command) eachActiveGroup(f func(cc *Command, g *Group)) { c.eachGroup(func(g *Group) { f(c, g) }) if c.Active != nil { c.Active.eachActiveGroup(f) } } func (c *Command) addHelpGroups(showHelp func() error) { if !c.hasBuiltinHelpGroup { c.addHelpGroup(showHelp) c.hasBuiltinHelpGroup = true } for _, cc := range c.commands { cc.addHelpGroups(showHelp) } } func (c *Command) makeLookup() lookup { ret := lookup{ shortNames: make(map[string]*Option), longNames: make(map[string]*Option), commands: make(map[string]*Command), } parent := c.parent var parents []*Command for parent != nil { if cmd, ok := parent.(*Command); ok { parents = append(parents, cmd) parent = cmd.parent } else { parent = nil } } for i := len(parents) - 1; i >= 0; i-- { parents[i].fillLookup(&ret, true) } c.fillLookup(&ret, false) return ret } func (c *Command) fillLookup(ret *lookup, onlyOptions bool) { c.eachGroup(func(g *Group) { for _, option := range g.options { if option.ShortName != 0 { ret.shortNames[string(option.ShortName)] = option } if len(option.LongName) > 0 { ret.longNames[option.LongNameWithNamespace()] = option } } }) if onlyOptions { return } for _, subcommand := range c.commands { ret.commands[subcommand.Name] = subcommand for _, a := range subcommand.Aliases { ret.commands[a] = subcommand } } } func (c *Command) groupByName(name string) *Group { if grp := c.Group.groupByName(name); grp != nil { return grp } for _, subc := range c.commands { prefix := subc.Name + "." if strings.HasPrefix(name, prefix) { if grp := subc.groupByName(name[len(prefix):]); grp != nil { return grp } } else if name == subc.Name { return subc.Group } } return nil } type commandList []*Command func (c commandList) Less(i, j int) bool { return c[i].Name < c[j].Name } func (c commandList) Len() int { return len(c) } func (c commandList) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c *Command) sortedVisibleCommands() []*Command { ret := commandList(c.visibleCommands()) sort.Sort(ret) return []*Command(ret) } func (c *Command) visibleCommands() []*Command { ret := make([]*Command, 0, len(c.commands)) for _, cmd := range c.commands { if !cmd.Hidden { ret = append(ret, cmd) } } return ret } func (c *Command) match(name string) bool { if c.Name == name { return true } for _, v := range c.Aliases { if v == name { return true } } return false } func (c *Command) hasHelpOptions() bool { ret := false c.eachGroup(func(g *Group) { if g.isBuiltinHelp { return } for _, opt := range g.options { if opt.showInHelp() { ret = true } } }) return ret } func (c *Command) fillParseState(s *parseState) { s.positional = make([]*Arg, len(c.args)) copy(s.positional, c.args) s.lookup = c.makeLookup() s.command = c } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/unknown_test.go0000644000000000000000000000172215024302467025062 0ustar rootrootpackage flags import ( "testing" ) func TestUnknownFlags(t *testing.T) { var opts = struct { Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` }{} args := []string{ "-f", } p := NewParser(&opts, 0) args, err := p.ParseArgs(args) if err == nil { t.Fatal("Expected error for unknown argument") } } func TestIgnoreUnknownFlags(t *testing.T) { var opts = struct { Verbose []bool `short:"v" long:"verbose" description:"Verbose output"` }{} args := []string{ "hello", "world", "-v", "--foo=bar", "--verbose", "-f", } p := NewParser(&opts, IgnoreUnknown) args, err := p.ParseArgs(args) if err != nil { t.Fatal(err) } exargs := []string{ "hello", "world", "--foo=bar", "-f", } issame := (len(args) == len(exargs)) if issame { for i := 0; i < len(args); i++ { if args[i] != exargs[i] { issame = false break } } } if !issame { t.Fatalf("Expected %v but got %v", exargs, args) } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/LICENSE0000644000000000000000000000275315024302467023007 0ustar rootrootCopyright (c) 2012 Jesse van den Kieboom. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/help.go0000644000000000000000000002330515024302467023255 0ustar rootroot// Copyright 2012 Jesse van den Kieboom. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flags import ( "bufio" "bytes" "fmt" "io" "runtime" "strings" "unicode/utf8" ) type alignmentInfo struct { maxLongLen int hasShort bool hasValueName bool terminalColumns int indent bool } const ( paddingBeforeOption = 2 distanceBetweenOptionAndDescription = 2 ) func (a *alignmentInfo) descriptionStart() int { ret := a.maxLongLen + distanceBetweenOptionAndDescription if a.hasShort { ret += 2 } if a.maxLongLen > 0 { ret += 4 } if a.hasValueName { ret += 3 } return ret } func (a *alignmentInfo) updateLen(name string, indent bool) { l := utf8.RuneCountInString(name) if indent { l = l + 4 } if l > a.maxLongLen { a.maxLongLen = l } } func (p *Parser) getAlignmentInfo() alignmentInfo { ret := alignmentInfo{ maxLongLen: 0, hasShort: false, hasValueName: false, terminalColumns: getTerminalColumns(), } if ret.terminalColumns <= 0 { ret.terminalColumns = 80 } var prevcmd *Command p.eachActiveGroup(func(c *Command, grp *Group) { if c != prevcmd { for _, arg := range c.args { ret.updateLen(arg.Name, c != p.Command) } prevcmd = c } if !grp.showInHelp() { return } for _, info := range grp.options { if !info.showInHelp() { continue } if info.ShortName != 0 { ret.hasShort = true } if len(info.ValueName) > 0 { ret.hasValueName = true } l := info.LongNameWithNamespace() + info.ValueName if len(info.Choices) != 0 { l += "[" + strings.Join(info.Choices, "|") + "]" } ret.updateLen(l, c != p.Command) } }) return ret } func wrapText(s string, l int, prefix string) string { var ret string if l < 10 { l = 10 } // Basic text wrapping of s at spaces to fit in l lines := strings.Split(s, "\n") for _, line := range lines { var retline string line = strings.TrimSpace(line) for len(line) > l { // Try to split on space suffix := "" pos := strings.LastIndex(line[:l], " ") if pos < 0 { pos = l - 1 suffix = "-\n" } if len(retline) != 0 { retline += "\n" + prefix } retline += strings.TrimSpace(line[:pos]) + suffix line = strings.TrimSpace(line[pos:]) } if len(line) > 0 { if len(retline) != 0 { retline += "\n" + prefix } retline += line } if len(ret) > 0 { ret += "\n" if len(retline) > 0 { ret += prefix } } ret += retline } return ret } func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) { line := &bytes.Buffer{} prefix := paddingBeforeOption if info.indent { prefix += 4 } if option.Hidden { return } line.WriteString(strings.Repeat(" ", prefix)) if option.ShortName != 0 { line.WriteRune(defaultShortOptDelimiter) line.WriteRune(option.ShortName) } else if info.hasShort { line.WriteString(" ") } descstart := info.descriptionStart() + paddingBeforeOption if len(option.LongName) > 0 { if option.ShortName != 0 { line.WriteString(", ") } else if info.hasShort { line.WriteString(" ") } line.WriteString(defaultLongOptDelimiter) line.WriteString(option.LongNameWithNamespace()) } if option.canArgument() { line.WriteRune(defaultNameArgDelimiter) if len(option.ValueName) > 0 { line.WriteString(option.ValueName) } if len(option.Choices) > 0 { line.WriteString("[" + strings.Join(option.Choices, "|") + "]") } } written := line.Len() line.WriteTo(writer) if option.Description != "" { dw := descstart - written writer.WriteString(strings.Repeat(" ", dw)) var def string if len(option.DefaultMask) != 0 { if option.DefaultMask != "-" { def = option.DefaultMask } } else { def = option.defaultLiteral } var envDef string if option.EnvKeyWithNamespace() != "" { var envPrintable string if runtime.GOOS == "windows" { envPrintable = "%" + option.EnvKeyWithNamespace() + "%" } else { envPrintable = "$" + option.EnvKeyWithNamespace() } envDef = fmt.Sprintf(" [%s]", envPrintable) } var desc string if def != "" { desc = fmt.Sprintf("%s (default: %v)%s", option.Description, def, envDef) } else { desc = option.Description + envDef } writer.WriteString(wrapText(desc, info.terminalColumns-descstart, strings.Repeat(" ", descstart))) } writer.WriteString("\n") } func maxCommandLength(s []*Command) int { if len(s) == 0 { return 0 } ret := len(s[0].Name) for _, v := range s[1:] { l := len(v.Name) if l > ret { ret = l } } return ret } // WriteHelp writes a help message containing all the possible options and // their descriptions to the provided writer. Note that the HelpFlag parser // option provides a convenient way to add a -h/--help option group to the // command line parser which will automatically show the help messages using // this method. func (p *Parser) WriteHelp(writer io.Writer) { if writer == nil { return } wr := bufio.NewWriter(writer) aligninfo := p.getAlignmentInfo() cmd := p.Command for cmd.Active != nil { cmd = cmd.Active } if p.Name != "" { wr.WriteString("Usage:\n") wr.WriteString(" ") allcmd := p.Command for allcmd != nil { var usage string if allcmd == p.Command { if len(p.Usage) != 0 { usage = p.Usage } else if p.Options&HelpFlag != 0 { usage = "[OPTIONS]" } } else if us, ok := allcmd.data.(Usage); ok { usage = us.Usage() } else if allcmd.hasHelpOptions() { usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name) } if len(usage) != 0 { fmt.Fprintf(wr, " %s %s", allcmd.Name, usage) } else { fmt.Fprintf(wr, " %s", allcmd.Name) } if len(allcmd.args) > 0 { fmt.Fprintf(wr, " ") } for i, arg := range allcmd.args { if i != 0 { fmt.Fprintf(wr, " ") } name := arg.Name if arg.isRemaining() { name = name + "..." } if !allcmd.ArgsRequired { if arg.Required > 0 { fmt.Fprintf(wr, "%s", name) } else { fmt.Fprintf(wr, "[%s]", name) } } else { fmt.Fprintf(wr, "%s", name) } } if allcmd.Active == nil && len(allcmd.commands) > 0 { var co, cc string if allcmd.SubcommandsOptional { co, cc = "[", "]" } else { co, cc = "<", ">" } visibleCommands := allcmd.visibleCommands() if len(visibleCommands) > 3 { fmt.Fprintf(wr, " %scommand%s", co, cc) } else { subcommands := allcmd.sortedVisibleCommands() names := make([]string, len(subcommands)) for i, subc := range subcommands { names[i] = subc.Name } fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc) } } allcmd = allcmd.Active } fmt.Fprintln(wr) if len(cmd.LongDescription) != 0 { fmt.Fprintln(wr) t := wrapText(cmd.LongDescription, aligninfo.terminalColumns, "") fmt.Fprintln(wr, t) } } c := p.Command for c != nil { printcmd := c != p.Command c.eachGroup(func(grp *Group) { first := true // Skip built-in help group for all commands except the top-level // parser if grp.Hidden || (grp.isBuiltinHelp && c != p.Command) { return } for _, info := range grp.options { if !info.showInHelp() { continue } if printcmd { fmt.Fprintf(wr, "\n[%s command options]\n", c.Name) aligninfo.indent = true printcmd = false } if first && cmd.Group != grp { fmt.Fprintln(wr) if aligninfo.indent { wr.WriteString(" ") } fmt.Fprintf(wr, "%s:\n", grp.ShortDescription) first = false } p.writeHelpOption(wr, info, aligninfo) } }) var args []*Arg for _, arg := range c.args { if arg.Description != "" { args = append(args, arg) } } if len(args) > 0 { if c == p.Command { fmt.Fprintf(wr, "\nArguments:\n") } else { fmt.Fprintf(wr, "\n[%s command arguments]\n", c.Name) } descStart := aligninfo.descriptionStart() + paddingBeforeOption for _, arg := range args { argPrefix := strings.Repeat(" ", paddingBeforeOption) argPrefix += arg.Name if len(arg.Description) > 0 { argPrefix += ":" wr.WriteString(argPrefix) // Space between "arg:" and the description start descPadding := strings.Repeat(" ", descStart-len(argPrefix)) // How much space the description gets before wrapping descWidth := aligninfo.terminalColumns - 1 - descStart // Whitespace to which we can indent new description lines descPrefix := strings.Repeat(" ", descStart) wr.WriteString(descPadding) wr.WriteString(wrapText(arg.Description, descWidth, descPrefix)) } else { wr.WriteString(argPrefix) } fmt.Fprintln(wr) } } c = c.Active } scommands := cmd.sortedVisibleCommands() if len(scommands) > 0 { maxnamelen := maxCommandLength(scommands) fmt.Fprintln(wr) fmt.Fprintln(wr, "Available commands:") for _, c := range scommands { fmt.Fprintf(wr, " %s", c.Name) if len(c.ShortDescription) > 0 { pad := strings.Repeat(" ", maxnamelen-len(c.Name)) fmt.Fprintf(wr, "%s %s", pad, c.ShortDescription) if len(c.Aliases) > 0 { fmt.Fprintf(wr, " (aliases: %s)", strings.Join(c.Aliases, ", ")) } } fmt.Fprintln(wr) } } wr.Flush() } // WroteHelp is a helper to test the error from ParseArgs() to // determine if the help message was written. It is safe to // call without first checking that error is nil. func WroteHelp(err error) bool { if err == nil { // No error return false } flagError, ok := err.(*Error) if !ok { // Not a go-flag error return false } if flagError.Type != ErrHelp { // Did not print the help message return false } return true } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/optstyle_windows.go0000644000000000000000000000563115024302467025764 0ustar rootroot//go:build !forceposix // +build !forceposix package flags import ( "strings" ) // Windows uses a front slash for both short and long options. Also it uses // a colon for name/argument delimter. const ( defaultShortOptDelimiter = '/' defaultLongOptDelimiter = "/" defaultNameArgDelimiter = ':' ) func argumentStartsOption(arg string) bool { return len(arg) > 0 && (arg[0] == '-' || arg[0] == '/') } func argumentIsOption(arg string) bool { // Windows-style options allow front slash for the option // delimiter. if len(arg) > 1 && arg[0] == '/' { return true } if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' { return true } if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' { return true } return false } // stripOptionPrefix returns the option without the prefix and whether or // not the option is a long option or not. func stripOptionPrefix(optname string) (prefix string, name string, islong bool) { // Determine if the argument is a long option or not. Windows // typically supports both long and short options with a single // front slash as the option delimiter, so handle this situation // nicely. possplit := 0 if strings.HasPrefix(optname, "--") { possplit = 2 islong = true } else if strings.HasPrefix(optname, "-") { possplit = 1 islong = false } else if strings.HasPrefix(optname, "/") { possplit = 1 islong = len(optname) > 2 } return optname[:possplit], optname[possplit:], islong } // splitOption attempts to split the passed option into a name and an argument. // When there is no argument specified, nil will be returned for it. func splitOption(prefix string, option string, islong bool) (string, string, *string) { if len(option) == 0 { return option, "", nil } // Windows typically uses a colon for the option name and argument // delimiter while POSIX typically uses an equals. Support both styles, // but don't allow the two to be mixed. That is to say /foo:bar and // --foo=bar are acceptable, but /foo=bar and --foo:bar are not. var pos int var sp string if prefix == "/" { sp = ":" pos = strings.Index(option, sp) } else if len(prefix) > 0 { sp = "=" pos = strings.Index(option, sp) } if (islong && pos >= 0) || (!islong && pos == 1) { rest := option[pos+1:] return option[:pos], sp, &rest } return option, "", nil } // addHelpGroup adds a new group that contains default help parameters. func (c *Command) addHelpGroup(showHelp func() error) *Group { // Windows CLI applications typically use /? for help, so make both // that available as well as the POSIX style h and help. var help struct { ShowHelpWindows func() error `short:"?" description:"Show this help message"` ShowHelpPosix func() error `short:"h" long:"help" description:"Show this help message"` } help.ShowHelpWindows = showHelp help.ShowHelpPosix = showHelp ret, _ := c.AddGroup("Help Options", "", &help) ret.isBuiltinHelp = true return ret } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/convert.go0000644000000000000000000001647415024302467024016 0ustar rootroot// Copyright 2012 Jesse van den Kieboom. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flags import ( "fmt" "reflect" "strconv" "strings" "time" ) // Marshaler is the interface implemented by types that can marshal themselves // to a string representation of the flag. type Marshaler interface { // MarshalFlag marshals a flag value to its string representation. MarshalFlag() (string, error) } // Unmarshaler is the interface implemented by types that can unmarshal a flag // argument to themselves. The provided value is directly passed from the // command line. type Unmarshaler interface { // UnmarshalFlag unmarshals a string value representation to the flag // value (which therefore needs to be a pointer receiver). UnmarshalFlag(value string) error } // ValueValidator is the interface implemented by types that can validate a // flag argument themselves. The provided value is directly passed from the // command line. type ValueValidator interface { // IsValidValue returns an error if the provided string value is valid for // the flag. IsValidValue(value string) error } func getBase(options multiTag, base int) (int, error) { sbase := options.Get("base") var err error var ivbase int64 if sbase != "" { ivbase, err = strconv.ParseInt(sbase, 10, 32) base = int(ivbase) } return base, err } func convertMarshal(val reflect.Value) (bool, string, error) { // Check first for the Marshaler interface if val.IsValid() && val.Type().NumMethod() > 0 && val.CanInterface() { if marshaler, ok := val.Interface().(Marshaler); ok { ret, err := marshaler.MarshalFlag() return true, ret, err } } return false, "", nil } func convertToString(val reflect.Value, options multiTag) (string, error) { if ok, ret, err := convertMarshal(val); ok { return ret, err } if !val.IsValid() { return "", nil } tp := val.Type() // Support for time.Duration if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() { stringer := val.Interface().(fmt.Stringer) return stringer.String(), nil } switch tp.Kind() { case reflect.String: return val.String(), nil case reflect.Bool: if val.Bool() { return "true", nil } return "false", nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: base, err := getBase(options, 10) if err != nil { return "", err } return strconv.FormatInt(val.Int(), base), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: base, err := getBase(options, 10) if err != nil { return "", err } return strconv.FormatUint(val.Uint(), base), nil case reflect.Float32, reflect.Float64: return strconv.FormatFloat(val.Float(), 'g', -1, tp.Bits()), nil case reflect.Slice: if val.Len() == 0 { return "", nil } ret := "[" for i := 0; i < val.Len(); i++ { if i != 0 { ret += ", " } item, err := convertToString(val.Index(i), options) if err != nil { return "", err } ret += item } return ret + "]", nil case reflect.Map: ret := "{" for i, key := range val.MapKeys() { if i != 0 { ret += ", " } keyitem, err := convertToString(key, options) if err != nil { return "", err } item, err := convertToString(val.MapIndex(key), options) if err != nil { return "", err } ret += keyitem + ":" + item } return ret + "}", nil case reflect.Ptr: return convertToString(reflect.Indirect(val), options) case reflect.Interface: if !val.IsNil() { return convertToString(val.Elem(), options) } } return "", nil } func convertUnmarshal(val string, retval reflect.Value) (bool, error) { if retval.Type().NumMethod() > 0 && retval.CanInterface() { if unmarshaler, ok := retval.Interface().(Unmarshaler); ok { if retval.IsNil() { retval.Set(reflect.New(retval.Type().Elem())) // Re-assign from the new value unmarshaler = retval.Interface().(Unmarshaler) } return true, unmarshaler.UnmarshalFlag(val) } } if retval.Type().Kind() != reflect.Ptr && retval.CanAddr() { return convertUnmarshal(val, retval.Addr()) } if retval.Type().Kind() == reflect.Interface && !retval.IsNil() { return convertUnmarshal(val, retval.Elem()) } return false, nil } func convert(val string, retval reflect.Value, options multiTag) error { if ok, err := convertUnmarshal(val, retval); ok { return err } tp := retval.Type() // Support for time.Duration if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() { parsed, err := time.ParseDuration(val) if err != nil { return err } retval.SetInt(int64(parsed)) return nil } switch tp.Kind() { case reflect.String: retval.SetString(val) case reflect.Bool: if val == "" { retval.SetBool(true) } else { b, err := strconv.ParseBool(val) if err != nil { return err } retval.SetBool(b) } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: base, err := getBase(options, 0) if err != nil { return err } parsed, err := strconv.ParseInt(val, base, tp.Bits()) if err != nil { return err } retval.SetInt(parsed) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: base, err := getBase(options, 0) if err != nil { return err } parsed, err := strconv.ParseUint(val, base, tp.Bits()) if err != nil { return err } retval.SetUint(parsed) case reflect.Float32, reflect.Float64: parsed, err := strconv.ParseFloat(val, tp.Bits()) if err != nil { return err } retval.SetFloat(parsed) case reflect.Slice: elemtp := tp.Elem() elemvalptr := reflect.New(elemtp) elemval := reflect.Indirect(elemvalptr) if err := convert(val, elemval, options); err != nil { return err } retval.Set(reflect.Append(retval, elemval)) case reflect.Map: keyValueDelimiter := options.Get("key-value-delimiter") if keyValueDelimiter == "" { keyValueDelimiter = ":" } parts := strings.SplitN(val, keyValueDelimiter, 2) key := parts[0] var value string if len(parts) == 2 { value = parts[1] } keytp := tp.Key() keyval := reflect.New(keytp) if err := convert(key, keyval, options); err != nil { return err } valuetp := tp.Elem() valueval := reflect.New(valuetp) if err := convert(value, valueval, options); err != nil { return err } if retval.IsNil() { retval.Set(reflect.MakeMap(tp)) } retval.SetMapIndex(reflect.Indirect(keyval), reflect.Indirect(valueval)) case reflect.Ptr: if retval.IsNil() { retval.Set(reflect.New(retval.Type().Elem())) } return convert(val, reflect.Indirect(retval), options) case reflect.Interface: if !retval.IsNil() { return convert(val, retval.Elem(), options) } } return nil } func isPrint(s string) bool { for _, c := range s { if !strconv.IsPrint(c) { return false } } return true } func quoteIfNeeded(s string) string { if !isPrint(s) { return strconv.Quote(s) } return s } func quoteIfNeededV(s []string) []string { ret := make([]string, len(s)) for i, v := range s { ret[i] = quoteIfNeeded(v) } return ret } func quoteV(s []string) []string { ret := make([]string, len(s)) for i, v := range s { ret[i] = strconv.Quote(v) } return ret } func unquoteIfPossible(s string) (string, error) { if len(s) == 0 || s[0] != '"' { return s, nil } return strconv.Unquote(s) } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/pointer_test.go0000644000000000000000000000617015024302467025045 0ustar rootrootpackage flags import ( "testing" ) func TestPointerBool(t *testing.T) { var opts = struct { Value *bool `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v") assertStringArray(t, ret, []string{}) if !*opts.Value { t.Errorf("Expected Value to be true") } } func TestPointerString(t *testing.T) { var opts = struct { Value *string `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v", "value") assertStringArray(t, ret, []string{}) assertString(t, *opts.Value, "value") } func TestPointerSlice(t *testing.T) { var opts = struct { Value *[]string `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v", "value1", "-v", "value2") assertStringArray(t, ret, []string{}) assertStringArray(t, *opts.Value, []string{"value1", "value2"}) } func TestPointerMap(t *testing.T) { var opts = struct { Value *map[string]int `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v", "k1:2", "-v", "k2:-5") assertStringArray(t, ret, []string{}) if v, ok := (*opts.Value)["k1"]; !ok { t.Errorf("Expected key \"k1\" to exist") } else if v != 2 { t.Errorf("Expected \"k1\" to be 2, but got %#v", v) } if v, ok := (*opts.Value)["k2"]; !ok { t.Errorf("Expected key \"k2\" to exist") } else if v != -5 { t.Errorf("Expected \"k2\" to be -5, but got %#v", v) } } type marshalledString string func (m *marshalledString) UnmarshalFlag(value string) error { *m = marshalledString(value) return nil } func (m marshalledString) MarshalFlag() (string, error) { return string(m), nil } func TestPointerStringMarshalled(t *testing.T) { var opts = struct { Value *marshalledString `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v", "value") assertStringArray(t, ret, []string{}) if opts.Value == nil { t.Error("Expected value not to be nil") return } assertString(t, string(*opts.Value), "value") } type marshalledStruct struct { Value string } func (m *marshalledStruct) UnmarshalFlag(value string) error { m.Value = value return nil } func (m marshalledStruct) MarshalFlag() (string, error) { return m.Value, nil } func TestPointerStructMarshalled(t *testing.T) { var opts = struct { Value *marshalledStruct `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v", "value") assertStringArray(t, ret, []string{}) if opts.Value == nil { t.Error("Expected value not to be nil") return } assertString(t, opts.Value.Value, "value") } type PointerGroup struct { Value bool `short:"v"` } func TestPointerGroup(t *testing.T) { var opts = struct { Group *PointerGroup `group:"Group Options"` }{} ret := assertParseSuccess(t, &opts, "-v") assertStringArray(t, ret, []string{}) if !opts.Group.Value { t.Errorf("Expected Group.Value to be true") } } func TestDoNotChangeNonTaggedFields(t *testing.T) { var opts struct { A struct { Pointer *int } B *struct { Pointer *int } } ret := assertParseSuccess(t, &opts) assertStringArray(t, ret, []string{}) if opts.A.Pointer != nil { t.Error("Expected A.Pointer to be nil") } if opts.B != nil { t.Error("Expected B to be nil") } if opts.B != nil && opts.B.Pointer != nil { t.Error("Expected B.Pointer to be nil") } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/go.mod0000644000000000000000000000011715024302467023100 0ustar rootrootmodule github.com/jessevdk/go-flags go 1.20 require golang.org/x/sys v0.21.0 dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/closest.go0000644000000000000000000000170315024302467023777 0ustar rootrootpackage flags func levenshtein(s string, t string) int { if len(s) == 0 { return len(t) } if len(t) == 0 { return len(s) } dists := make([][]int, len(s)+1) for i := range dists { dists[i] = make([]int, len(t)+1) dists[i][0] = i } for j := range t { dists[0][j] = j } for i, sc := range s { for j, tc := range t { if sc == tc { dists[i+1][j+1] = dists[i][j] } else { dists[i+1][j+1] = dists[i][j] + 1 if dists[i+1][j] < dists[i+1][j+1] { dists[i+1][j+1] = dists[i+1][j] + 1 } if dists[i][j+1] < dists[i+1][j+1] { dists[i+1][j+1] = dists[i][j+1] + 1 } } } } return dists[len(s)][len(t)] } func closestChoice(cmd string, choices []string) (string, int) { if len(choices) == 0 { return "", 0 } mincmd := -1 mindist := -1 for i, c := range choices { l := levenshtein(cmd, c) if mincmd < 0 || l < mindist { mindist = l mincmd = i } } return choices[mincmd], mindist } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/ini_test.go0000644000000000000000000005475715024302467024162 0ustar rootrootpackage flags import ( "bytes" "fmt" "io/ioutil" "os" "reflect" "strings" "testing" ) func TestWriteIni(t *testing.T) { oldEnv := EnvSnapshot() defer oldEnv.Restore() os.Setenv("ENV_DEFAULT", "env-def") var tests = []struct { args []string options IniOptions expected string }{ { []string{"-vv", "--intmap=a:2", "--intmap", "b:3", "filename", "0", "3.14", "command"}, IniDefault, `[Application Options] ; Show verbose debug information verbose = true verbose = true ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def [Other Options] ; A map from string to int int-map = a:2 int-map = b:3 `, }, { []string{"-vv", "--intmap=a:2", "--intmap", "b:3", "filename", "0", "3.14", "command"}, IniDefault | IniIncludeDefaults, `[Application Options] ; Show verbose debug information verbose = true verbose = true ; A slice of pointers to string ; PtrSlice = EmptyDescription = false ; Test default value Default = "Some\nvalue" ; Test default array value DefaultArray = Some value DefaultArray = "Other\tvalue" ; Testdefault map value DefaultMap = another:value DefaultMap = some:value ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def ; Option with named argument OptionWithArgName = ; Option with choices OptionWithChoices = ; Option only available in ini only-ini = [Other Options] ; A slice of strings StringSlice = some StringSlice = value ; A map from string to int int-map = a:2 int-map = b:3 [Subgroup] ; This is a subgroup option Opt = ; Not hidden inside group NotHiddenInsideGroup = [Subsubgroup] ; This is a subsubgroup option Opt = [command] ; Use for extra verbosity ; ExtraVerbose = [parent] ; This is a parent command option Opt = [parent.sub] ; This is a sub command option Opt = `, }, { []string{"filename", "0", "3.14", "command"}, IniDefault | IniIncludeDefaults | IniCommentDefaults, `[Application Options] ; Show verbose debug information ; verbose = ; A slice of pointers to string ; PtrSlice = ; EmptyDescription = false ; Test default value ; Default = "Some\nvalue" ; Test default array value ; DefaultArray = Some value ; DefaultArray = "Other\tvalue" ; Testdefault map value ; DefaultMap = another:value ; DefaultMap = some:value ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def ; Option with named argument ; OptionWithArgName = ; Option with choices ; OptionWithChoices = ; Option only available in ini ; only-ini = [Other Options] ; A slice of strings ; StringSlice = some ; StringSlice = value ; A map from string to int ; int-map = a:1 [Subgroup] ; This is a subgroup option ; Opt = ; Not hidden inside group ; NotHiddenInsideGroup = [Subsubgroup] ; This is a subsubgroup option ; Opt = [command] ; Use for extra verbosity ; ExtraVerbose = [parent] ; This is a parent command option ; Opt = [parent.sub] ; This is a sub command option ; Opt = `, }, { []string{"--default=New value", "--default-array=New value", "--default-map=new:value", "filename", "0", "3.14", "command"}, IniDefault | IniIncludeDefaults | IniCommentDefaults, `[Application Options] ; Show verbose debug information ; verbose = ; A slice of pointers to string ; PtrSlice = ; EmptyDescription = false ; Test default value Default = New value ; Test default array value DefaultArray = New value ; Testdefault map value DefaultMap = new:value ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def ; Option with named argument ; OptionWithArgName = ; Option with choices ; OptionWithChoices = ; Option only available in ini ; only-ini = [Other Options] ; A slice of strings ; StringSlice = some ; StringSlice = value ; A map from string to int ; int-map = a:1 [Subgroup] ; This is a subgroup option ; Opt = ; Not hidden inside group ; NotHiddenInsideGroup = [Subsubgroup] ; This is a subsubgroup option ; Opt = [command] ; Use for extra verbosity ; ExtraVerbose = [parent] ; This is a parent command option ; Opt = [parent.sub] ; This is a sub command option ; Opt = `, }, { []string{"-vv", "filename", "0", "3.14", "parent", "--opt=p", "sub", "--opt=s"}, IniDefault, `[Application Options] ; Show verbose debug information verbose = true verbose = true ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def [parent] ; This is a parent command option Opt = p [parent.sub] ; This is a sub command option Opt = s `, }, } for _, test := range tests { var opts helpOptions p := NewNamedParser("TestIni", Default) p.AddGroup("Application Options", "The application options", &opts) _, err := p.ParseArgs(test.args) if err != nil { t.Fatalf("Unexpected error: %v", err) } inip := NewIniParser(p) var b bytes.Buffer inip.Write(&b, test.options) got := b.String() expected := test.expected msg := fmt.Sprintf("with arguments %+v and ini options %b", test.args, test.options) assertDiff(t, got, expected, msg) } } func TestReadIni_flagEquivalent(t *testing.T) { type options struct { Opt1 bool `long:"opt1"` Group1 struct { Opt2 bool `long:"opt2"` } `group:"group1"` Group2 struct { Opt3 bool `long:"opt3"` } `group:"group2" namespace:"ns1"` Cmd1 struct { Opt4 bool `long:"opt4"` Opt5 bool `long:"foo.opt5"` Group1 struct { Opt6 bool `long:"opt6"` Opt7 bool `long:"foo.opt7"` } `group:"group1"` Group2 struct { Opt8 bool `long:"opt8"` } `group:"group2" namespace:"ns1"` } `command:"cmd1"` } a := ` opt1=true [group1] opt2=true [group2] ns1.opt3=true [cmd1] opt4=true foo.opt5=true [cmd1.group1] opt6=true foo.opt7=true [cmd1.group2] ns1.opt8=true ` b := ` opt1=true opt2=true ns1.opt3=true [cmd1] opt4=true foo.opt5=true opt6=true foo.opt7=true ns1.opt8=true ` parse := func(readIni string) (opts options, writeIni string) { p := NewNamedParser("TestIni", Default) p.AddGroup("Application Options", "The application options", &opts) inip := NewIniParser(p) err := inip.Parse(strings.NewReader(readIni)) if err != nil { t.Fatalf("Unexpected error: %s\n\nFile:\n%s", err, readIni) } var b bytes.Buffer inip.Write(&b, Default) return opts, b.String() } aOpt, aIni := parse(a) bOpt, bIni := parse(b) assertDiff(t, aIni, bIni, "") if !reflect.DeepEqual(aOpt, bOpt) { t.Errorf("not equal") } } func TestReadIni(t *testing.T) { var opts helpOptions p := NewNamedParser("TestIni", Default) p.AddGroup("Application Options", "The application options", &opts) inip := NewIniParser(p) inic := ` ; Show verbose debug information verbose = true verbose = true DefaultMap = another:"value\n1" DefaultMap = some:value 2 [Application Options] ; A slice of pointers to string ; PtrSlice = ; Test default value Default = "New\nvalue" ; Test env-default1 value EnvDefault1 = New value [Other Options] # A slice of strings StringSlice = "some\nvalue" StringSlice = another value ; A map from string to int int-map = a:2 int-map = b:3 ` b := strings.NewReader(inic) err := inip.Parse(b) if err != nil { t.Fatalf("Unexpected error: %s", err) } assertBoolArray(t, opts.Verbose, []bool{true, true}) if v := map[string]string{"another": "value\n1", "some": "value 2"}; !reflect.DeepEqual(opts.DefaultMap, v) { t.Fatalf("Expected %#v for DefaultMap but got %#v", v, opts.DefaultMap) } assertString(t, opts.Default, "New\nvalue") assertString(t, opts.EnvDefault1, "New value") assertStringArray(t, opts.Other.StringSlice, []string{"some\nvalue", "another value"}) if v, ok := opts.Other.IntMap["a"]; !ok { t.Errorf("Expected \"a\" in Other.IntMap") } else if v != 2 { t.Errorf("Expected Other.IntMap[\"a\"] = 2, but got %v", v) } if v, ok := opts.Other.IntMap["b"]; !ok { t.Errorf("Expected \"b\" in Other.IntMap") } else if v != 3 { t.Errorf("Expected Other.IntMap[\"b\"] = 3, but got %v", v) } } func TestReadAndWriteIni(t *testing.T) { var tests = []struct { options IniOptions read string write string }{ { IniIncludeComments, `[Application Options] ; Show verbose debug information verbose = true verbose = true ; Test default value Default = "quote me" ; Test default array value DefaultArray = 1 DefaultArray = "2" DefaultArray = 3 ; Testdefault map value ; DefaultMap = ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def [Other Options] ; A slice of strings ; StringSlice = ; A map from string to int int-map = a:2 int-map = b:"3" `, `[Application Options] ; Show verbose debug information verbose = true verbose = true ; Test default value Default = "quote me" ; Test default array value DefaultArray = 1 DefaultArray = 2 DefaultArray = 3 ; Testdefault map value ; DefaultMap = ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def [Other Options] ; A slice of strings ; StringSlice = ; A map from string to int int-map = a:2 int-map = b:3 `, }, { IniIncludeComments, `[Application Options] ; Show verbose debug information verbose = true verbose = true ; Test default value Default = "quote me" ; Test default array value DefaultArray = "1" DefaultArray = "2" DefaultArray = "3" ; Testdefault map value ; DefaultMap = ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def [Other Options] ; A slice of strings ; StringSlice = ; A map from string to int int-map = a:"2" int-map = b:"3" `, `[Application Options] ; Show verbose debug information verbose = true verbose = true ; Test default value Default = "quote me" ; Test default array value DefaultArray = "1" DefaultArray = "2" DefaultArray = "3" ; Testdefault map value ; DefaultMap = ; Test env-default1 value EnvDefault1 = env-def ; Test env-default2 value EnvDefault2 = env-def [Other Options] ; A slice of strings ; StringSlice = ; A map from string to int int-map = a:"2" int-map = b:"3" `, }, } for _, test := range tests { var opts helpOptions p := NewNamedParser("TestIni", Default) p.AddGroup("Application Options", "The application options", &opts) inip := NewIniParser(p) read := strings.NewReader(test.read) err := inip.Parse(read) if err != nil { t.Fatalf("Unexpected error: %s", err) } var write bytes.Buffer inip.Write(&write, test.options) got := write.String() msg := fmt.Sprintf("with ini options %b", test.options) assertDiff(t, got, test.write, msg) } } func TestReadIniWrongQuoting(t *testing.T) { var tests = []struct { iniFile string lineNumber uint }{ { iniFile: `Default = "New\nvalue`, lineNumber: 1, }, { iniFile: `StringSlice = "New\nvalue`, lineNumber: 1, }, { iniFile: `StringSlice = "New\nvalue" StringSlice = "Second\nvalue`, lineNumber: 2, }, { iniFile: `DefaultMap = some:"value`, lineNumber: 1, }, { iniFile: `DefaultMap = some:value DefaultMap = another:"value`, lineNumber: 2, }, } for _, test := range tests { var opts helpOptions p := NewNamedParser("TestIni", Default) p.AddGroup("Application Options", "The application options", &opts) inip := NewIniParser(p) inic := test.iniFile b := strings.NewReader(inic) err := inip.Parse(b) if err == nil { t.Fatalf("Expect error") } iniError := err.(*IniError) if iniError.LineNumber != test.lineNumber { t.Fatalf("Expect error on line %d", test.lineNumber) } } } func TestIniCommands(t *testing.T) { var opts struct { Value string `short:"v" long:"value"` Add struct { Name int `short:"n" long:"name" ini-name:"AliasName"` Other struct { O string `short:"o" long:"other"` } `group:"Other Options"` } `command:"add"` } p := NewNamedParser("TestIni", Default) p.AddGroup("Application Options", "The application options", &opts) inip := NewIniParser(p) inic := `[Application Options] value = some value [add] AliasName = 5 [add.Other Options] other = subgroup ` b := strings.NewReader(inic) err := inip.Parse(b) if err != nil { t.Fatalf("Unexpected error: %s", err) } assertString(t, opts.Value, "some value") if opts.Add.Name != 5 { t.Errorf("Expected opts.Add.Name to be 5, but got %v", opts.Add.Name) } assertString(t, opts.Add.Other.O, "subgroup") // Test writing it back buf := &bytes.Buffer{} inip.Write(buf, IniDefault) assertDiff(t, buf.String(), inic, "ini contents") } func TestIniNoIni(t *testing.T) { var opts struct { NoValue string `short:"n" long:"novalue" no-ini:"yes"` Value string `short:"v" long:"value"` } p := NewNamedParser("TestIni", Default) p.AddGroup("Application Options", "The application options", &opts) inip := NewIniParser(p) // read INI inic := `[Application Options] novalue = some value value = some other value ` b := strings.NewReader(inic) err := inip.Parse(b) if err == nil { t.Fatalf("Expected error") } iniError := err.(*IniError) if v := uint(2); iniError.LineNumber != v { t.Errorf("Expected opts.Add.Name to be %d, but got %d", v, iniError.LineNumber) } if v := "unknown option: novalue"; iniError.Message != v { t.Errorf("Expected opts.Add.Name to be %s, but got %s", v, iniError.Message) } // write INI opts.NoValue = "some value" opts.Value = "some other value" file, err := ioutil.TempFile("", "") if err != nil { t.Fatalf("Cannot create temporary file: %s", err) } defer os.Remove(file.Name()) err = inip.WriteFile(file.Name(), IniIncludeDefaults) if err != nil { t.Fatalf("Could not write ini file: %s", err) } found, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatalf("Could not read written ini file: %s", err) } expected := "[Application Options]\nValue = some other value\n\n" assertDiff(t, string(found), expected, "ini content") } func TestIniParse(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatalf("Cannot create temporary file: %s", err) } defer os.Remove(file.Name()) _, err = file.WriteString("value = 123") if err != nil { t.Fatalf("Cannot write to temporary file: %s", err) } file.Close() var opts struct { Value int `long:"value"` } err = IniParse(file.Name(), &opts) if err != nil { t.Fatalf("Could not parse ini: %s", err) } if opts.Value != 123 { t.Fatalf("Expected Value to be \"123\" but was \"%d\"", opts.Value) } } func TestIniCliOverrides(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatalf("Cannot create temporary file: %s", err) } defer os.Remove(file.Name()) _, err = file.WriteString("values = 123\n") _, err = file.WriteString("values = 456\n") if err != nil { t.Fatalf("Cannot write to temporary file: %s", err) } file.Close() var opts struct { Values []int `long:"values"` } p := NewParser(&opts, Default) err = NewIniParser(p).ParseFile(file.Name()) if err != nil { t.Fatalf("Could not parse ini: %s", err) } _, err = p.ParseArgs([]string{"--values", "111", "--values", "222"}) if err != nil { t.Fatalf("Failed to parse arguments: %s", err) } if len(opts.Values) != 2 { t.Fatalf("Expected Values to contain two elements, but got %d", len(opts.Values)) } if opts.Values[0] != 111 { t.Fatalf("Expected Values[0] to be 111, but got '%d'", opts.Values[0]) } if opts.Values[1] != 222 { t.Fatalf("Expected Values[1] to be 222, but got '%d'", opts.Values[1]) } } func TestIniOverrides(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatalf("Cannot create temporary file: %s", err) } defer os.Remove(file.Name()) _, err = file.WriteString("value-with-default = \"ini-value\"\n") _, err = file.WriteString("value-with-default-override-cli = \"ini-value\"\n") if err != nil { t.Fatalf("Cannot write to temporary file: %s", err) } file.Close() var opts struct { ValueWithDefault string `long:"value-with-default" default:"value"` ValueWithDefaultOverrideCli string `long:"value-with-default-override-cli" default:"value"` } p := NewParser(&opts, Default) err = NewIniParser(p).ParseFile(file.Name()) if err != nil { t.Fatalf("Could not parse ini: %s", err) } _, err = p.ParseArgs([]string{"--value-with-default-override-cli", "cli-value"}) if err != nil { t.Fatalf("Failed to parse arguments: %s", err) } assertString(t, opts.ValueWithDefault, "ini-value") assertString(t, opts.ValueWithDefaultOverrideCli, "cli-value") } func TestIniOverridesFromConfigFlag(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatalf("Cannot create temporary file: %s", err) } defer os.Remove(file.Name()) _, err = file.WriteString("value-with-default = \"ini-value\"\n") _, err = file.WriteString("value-with-default-override-cli = \"ini-value\"\n") if err != nil { t.Fatalf("Cannot write to temporary file: %s", err) } file.Close() var opts struct { Config func(filename string) `long:"config"` ValueWithDefault string `long:"value-with-default" default:"value"` ValueWithDefaultOverrideCli string `long:"value-with-default-override-cli" default:"value"` } p := NewParser(&opts, Default) opt := p.FindOptionByLongName("config") opt.Default = []string{file.Name()} opts.Config = func(filename string) { parser := NewIniParser(p) parser.ParseAsDefaults = true parser.ParseFile(filename) } _, err = p.ParseArgs([]string{"--value-with-default-override-cli", "cli-value"}) if err != nil { t.Fatalf("Failed to parse arguments: %s", err) } assertString(t, opts.ValueWithDefault, "ini-value") assertString(t, opts.ValueWithDefaultOverrideCli, "cli-value") } func TestIniRequired(t *testing.T) { var opts struct { Required string `short:"r" required:"yes" description:"required"` Config func(s string) error `long:"config" default:"no-ini-file" no-ini:"true"` } p := NewParser(&opts, Default) opts.Config = func(s string) error { inip := NewIniParser(p) inip.ParseAsDefaults = true return inip.Parse(strings.NewReader("Required = ini-value\n")) } _, err := p.ParseArgs([]string{"-r", "cli-value"}) if err != nil { t.Fatalf("Failed to parse arguments: %s", err) } assertString(t, opts.Required, "cli-value") } func TestIniRequiredSlice_ShouldNotNeedToBeSpecifiedOnCli(t *testing.T) { type options struct { Items []string `long:"item" required:"true"` } var opts options ini := ` [Application Options] item=abc` args := []string{} parser := NewParser(&opts, Default) inip := NewIniParser(parser) inip.Parse(strings.NewReader(ini)) _, err := parser.ParseArgs(args) if err != nil { t.Fatalf("Unexpected failure: %v", err) } assertString(t, opts.Items[0], "abc") } func TestWriteFile(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatalf("Cannot create temporary file: %s", err) } defer os.Remove(file.Name()) var opts struct { Value int `long:"value"` } opts.Value = 123 p := NewParser(&opts, Default) ini := NewIniParser(p) err = ini.WriteFile(file.Name(), IniIncludeDefaults) if err != nil { t.Fatalf("Could not write ini file: %s", err) } found, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatalf("Could not read written ini file: %s", err) } expected := "[Application Options]\nValue = 123\n\n" assertDiff(t, string(found), expected, "ini content") } func TestOverwriteRequiredOptions(t *testing.T) { var tests = []struct { args []string expected []string }{ { args: []string{"--value", "from CLI"}, expected: []string{ "from CLI", "from default", }, }, { args: []string{"--value", "from CLI", "--default", "from CLI"}, expected: []string{ "from CLI", "from CLI", }, }, { args: []string{"--config", "no file name"}, expected: []string{ "from INI", "from INI", }, }, { args: []string{"--value", "from CLI before", "--default", "from CLI before", "--config", "no file name"}, expected: []string{ "from INI", "from INI", }, }, { args: []string{"--value", "from CLI before", "--default", "from CLI before", "--config", "no file name", "--value", "from CLI after", "--default", "from CLI after"}, expected: []string{ "from CLI after", "from CLI after", }, }, } for _, test := range tests { var opts struct { Config func(s string) error `long:"config" no-ini:"true"` Value string `long:"value" required:"true"` Default string `long:"default" required:"true" default:"from default"` } p := NewParser(&opts, Default) opts.Config = func(s string) error { ini := NewIniParser(p) return ini.Parse(bytes.NewBufferString("value = from INI\ndefault = from INI")) } _, err := p.ParseArgs(test.args) if err != nil { t.Fatalf("Unexpected error %s with args %+v", err, test.args) } if opts.Value != test.expected[0] { t.Fatalf("Expected Value to be \"%s\" but was \"%s\" with args %+v", test.expected[0], opts.Value, test.args) } if opts.Default != test.expected[1] { t.Fatalf("Expected Default to be \"%s\" but was \"%s\" with args %+v", test.expected[1], opts.Default, test.args) } } } func TestIniOverwriteOptions(t *testing.T) { var tests = []struct { args []string expected string toggled bool }{ { args: []string{}, expected: "from default", }, { args: []string{"--value", "from CLI"}, expected: "from CLI", }, { args: []string{"--config", "no file name"}, expected: "from INI", toggled: true, }, { args: []string{"--value", "from CLI before", "--config", "no file name"}, expected: "from CLI before", toggled: true, }, { args: []string{"--config", "no file name", "--value", "from CLI after"}, expected: "from CLI after", toggled: true, }, { args: []string{"--toggle"}, toggled: true, expected: "from default", }, } for _, test := range tests { var opts struct { Config string `long:"config" no-ini:"true"` Value string `long:"value" default:"from default"` Toggle bool `long:"toggle"` } p := NewParser(&opts, Default) _, err := p.ParseArgs(test.args) if err != nil { t.Fatalf("Unexpected error %s with args %+v", err, test.args) } if opts.Config != "" { inip := NewIniParser(p) inip.ParseAsDefaults = true err = inip.Parse(bytes.NewBufferString("value = from INI\ntoggle = true")) if err != nil { t.Fatalf("Unexpected error %s with args %+v", err, test.args) } } if opts.Value != test.expected { t.Fatalf("Expected Value to be \"%s\" but was \"%s\" with args %+v", test.expected, opts.Value, test.args) } if opts.Toggle != test.toggled { t.Fatalf("Expected Toggle to be \"%v\" but was \"%v\" with args %+v", test.toggled, opts.Toggle, test.args) } } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/README.md0000644000000000000000000001164615024302467023262 0ustar rootrootgo-flags: a go library for parsing command line arguments ========================================================= [![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) This library provides similar functionality to the builtin flag library of go, but provides much more functionality and nicer formatting. From the documentation: Package flags provides an extensive command line option parser. The flags package is similar in functionality to the go builtin flag package but provides more options and uses reflection to provide a convenient and succinct way of specifying command line options. Supported features: * Options with short names (-v) * Options with long names (--verbose) * Options with and without arguments (bool v.s. other type) * Options with optional arguments and default values * Multiple option groups each containing a set of options * Generate and print well-formatted help message * Passing remaining command line arguments after -- (optional) * Ignoring unknown command line options (optional) * Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification * Supports multiple short options -aux * Supports all primitive go types (string, int{8..64}, uint{8..64}, float) * Supports same option multiple times (can store in slice or last option counts) * Supports maps * Supports function callbacks * Supports namespaces for (nested) option groups The flags package uses structs, reflection and struct field tags to allow users to specify command line options. This results in very simple and concise specification of your application options. For example: ```go type Options struct { Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` } ``` This specifies one option with a short name -v and a long name --verbose. When either -v or --verbose is found on the command line, a 'true' value will be appended to the Verbose field. e.g. when specifying -vvv, the resulting value of Verbose will be {[true, true, true]}. Example: -------- ```go var opts struct { // Slice of bool will append 'true' each time the option // is encountered (can be set multiple times, like -vvv) Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` // Example of automatic marshalling to desired type (uint) Offset uint `long:"offset" description:"Offset"` // Example of a callback, called each time the option is found. Call func(string) `short:"c" description:"Call phone number"` // Example of a required flag Name string `short:"n" long:"name" description:"A name" required:"true"` // Example of a flag restricted to a pre-defined set of strings Animal string `long:"animal" choice:"cat" choice:"dog"` // Example of a value name File string `short:"f" long:"file" description:"A file" value-name:"FILE"` // Example of a pointer Ptr *int `short:"p" description:"A pointer to an integer"` // Example of a slice of strings StringSlice []string `short:"s" description:"A slice of strings"` // Example of a slice of pointers PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` // Example of a map IntMap map[string]int `long:"intmap" description:"A map from string to int"` // Example of env variable Thresholds []int `long:"thresholds" default:"1" default:"2" env:"THRESHOLD_VALUES" env-delim:","` } // Callback which will invoke callto: to call a number. // Note that this works just on OS X (and probably only with // Skype) but it shows the idea. opts.Call = func(num string) { cmd := exec.Command("open", "callto:"+num) cmd.Start() cmd.Process.Release() } // Make some fake arguments to parse. args := []string{ "-vv", "--offset=5", "-n", "Me", "--animal", "dog", // anything other than "cat" or "dog" will raise an error "-p", "3", "-s", "hello", "-s", "world", "--ptrslice", "hello", "--ptrslice", "world", "--intmap", "a:1", "--intmap", "b:5", "arg1", "arg2", "arg3", } // Parse flags from `args'. Note that here we use flags.ParseArgs for // the sake of making a working example. Normally, you would simply use // flags.Parse(&opts) which uses os.Args args, err := flags.ParseArgs(&opts, args) if err != nil { panic(err) } fmt.Printf("Verbosity: %v\n", opts.Verbose) fmt.Printf("Offset: %d\n", opts.Offset) fmt.Printf("Name: %s\n", opts.Name) fmt.Printf("Animal: %s\n", opts.Animal) fmt.Printf("Ptr: %d\n", *opts.Ptr) fmt.Printf("StringSlice: %v\n", opts.StringSlice) fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1]) fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"]) fmt.Printf("Remaining args: %s\n", strings.Join(args, " ")) // Output: Verbosity: [true true] // Offset: 5 // Name: Me // Ptr: 3 // StringSlice: [hello world] // PtrSlice: [hello world] // IntMap: [a:1 b:5] // Remaining args: arg1 arg2 arg3 ``` More information can be found in the godocs: dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/assert_test.go0000644000000000000000000000713515024302467024670 0ustar rootrootpackage flags import ( "fmt" "io" "io/ioutil" "os" "os/exec" "path" "runtime" "testing" ) func assertCallerInfo() (string, int) { ptr := make([]uintptr, 15) n := runtime.Callers(1, ptr) if n == 0 { return "", 0 } mef := runtime.FuncForPC(ptr[0]) mefile, meline := mef.FileLine(ptr[0]) for i := 2; i < n; i++ { f := runtime.FuncForPC(ptr[i]) file, line := f.FileLine(ptr[i]) if file != mefile { return file, line } } return mefile, meline } func assertErrorf(t *testing.T, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) file, line := assertCallerInfo() t.Errorf("%s:%d: %s", path.Base(file), line, msg) } func assertFatalf(t *testing.T, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) file, line := assertCallerInfo() t.Fatalf("%s:%d: %s", path.Base(file), line, msg) } func assertString(t *testing.T, a string, b string) { if a != b { assertErrorf(t, "Expected %#v, but got %#v", b, a) } } func assertStringArray(t *testing.T, a []string, b []string) { if len(a) != len(b) { assertErrorf(t, "Expected %#v, but got %#v", b, a) return } for i, v := range a { if b[i] != v { assertErrorf(t, "Expected %#v, but got %#v", b, a) return } } } func assertBoolArray(t *testing.T, a []bool, b []bool) { if len(a) != len(b) { assertErrorf(t, "Expected %#v, but got %#v", b, a) return } for i, v := range a { if b[i] != v { assertErrorf(t, "Expected %#v, but got %#v", b, a) return } } } func assertParserSuccess(t *testing.T, data interface{}, args ...string) (*Parser, []string) { parser := NewParser(data, Default&^PrintErrors) ret, err := parser.ParseArgs(args) if err != nil { t.Fatalf("Unexpected parse error: %s", err) return nil, nil } return parser, ret } func assertParseSuccess(t *testing.T, data interface{}, args ...string) []string { _, ret := assertParserSuccess(t, data, args...) return ret } func assertError(t *testing.T, err error, typ ErrorType, msg string) { if err == nil { assertFatalf(t, "Expected error: \"%s\", but no error occurred", msg) return } if e, ok := err.(*Error); !ok { assertFatalf(t, "Expected Error type, but got %#v", err) } else { if e.Type != typ { assertErrorf(t, "Expected error type {%s}, but got {%s}", typ, e.Type) } if e.Message != msg { assertErrorf(t, "Expected error message %#v, but got %#v", msg, e.Message) } } } func assertParseFail(t *testing.T, typ ErrorType, msg string, data interface{}, args ...string) []string { parser := NewParser(data, Default&^PrintErrors) ret, err := parser.ParseArgs(args) assertError(t, err, typ, msg) return ret } func diff(a, b string) (string, error) { atmp, err := ioutil.TempFile("", "help-diff") if err != nil { return "", err } btmp, err := ioutil.TempFile("", "help-diff") if err != nil { return "", err } if _, err := io.WriteString(atmp, a); err != nil { return "", err } if _, err := io.WriteString(btmp, b); err != nil { return "", err } ret, err := exec.Command("diff", "-u", "-d", "--label", "got", atmp.Name(), "--label", "expected", btmp.Name()).Output() os.Remove(atmp.Name()) os.Remove(btmp.Name()) if err.Error() == "exit status 1" { return string(ret), nil } return string(ret), err } func assertDiff(t *testing.T, actual, expected, msg string) { if actual == expected { return } ret, err := diff(actual, expected) if err != nil { assertErrorf(t, "Unexpected diff error: %s", err) assertErrorf(t, "Unexpected %s, expected:\n\n%s\n\nbut got\n\n%s", msg, expected, actual) } else { assertErrorf(t, "Unexpected %s:\n\n%s", msg, ret) } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/termsize.go0000644000000000000000000000045515024302467024170 0ustar rootroot//go:build !windows && !plan9 && !appengine && !wasm && !aix // +build !windows,!plan9,!appengine,!wasm,!aix package flags import ( "golang.org/x/sys/unix" ) func getTerminalColumns() int { ws, err := unix.IoctlGetWinsize(0, unix.TIOCGWINSZ) if err != nil { return 80 } return int(ws.Col) } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/marshal_test.go0000644000000000000000000000467215024302467025021 0ustar rootrootpackage flags import ( "fmt" "testing" ) type marshalled string func (m *marshalled) UnmarshalFlag(value string) error { if value == "yes" { *m = "true" } else if value == "no" { *m = "false" } else { return fmt.Errorf("`%s' is not a valid value, please specify `yes' or `no'", value) } return nil } func (m marshalled) MarshalFlag() (string, error) { if m == "true" { return "yes", nil } return "no", nil } type marshalledError bool func (m marshalledError) MarshalFlag() (string, error) { return "", newErrorf(ErrMarshal, "Failed to marshal") } func TestUnmarshal(t *testing.T) { var opts = struct { Value marshalled `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v=yes") assertStringArray(t, ret, []string{}) if opts.Value != "true" { t.Errorf("Expected Value to be \"true\"") } } func TestUnmarshalDefault(t *testing.T) { var opts = struct { Value marshalled `short:"v" default:"yes"` }{} ret := assertParseSuccess(t, &opts) assertStringArray(t, ret, []string{}) if opts.Value != "true" { t.Errorf("Expected Value to be \"true\"") } } func TestUnmarshalOptional(t *testing.T) { var opts = struct { Value marshalled `short:"v" optional:"yes" optional-value:"yes"` }{} ret := assertParseSuccess(t, &opts, "-v") assertStringArray(t, ret, []string{}) if opts.Value != "true" { t.Errorf("Expected Value to be \"true\"") } } func TestUnmarshalError(t *testing.T) { var opts = struct { Value marshalled `short:"v"` }{} assertParseFail(t, ErrMarshal, fmt.Sprintf("invalid argument for flag `%cv' (expected flags.marshalled): `invalid' is not a valid value, please specify `yes' or `no'", defaultShortOptDelimiter), &opts, "-vinvalid") } func TestUnmarshalPositionalError(t *testing.T) { var opts = struct { Args struct { Value marshalled } `positional-args:"yes"` }{} parser := NewParser(&opts, Default&^PrintErrors) _, err := parser.ParseArgs([]string{"invalid"}) msg := "`invalid' is not a valid value, please specify `yes' or `no'" if err == nil { assertFatalf(t, "Expected error: %s", msg) return } if err.Error() != msg { assertErrorf(t, "Expected error message %#v, but got %#v", msg, err.Error()) } } func TestMarshalError(t *testing.T) { var opts = struct { Value marshalledError `short:"v"` }{} p := NewParser(&opts, Default) o := p.Command.Groups()[0].Options()[0] _, err := convertToString(o.value, o.tag) assertError(t, err, ErrMarshal, "Failed to marshal") } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/flags.go0000644000000000000000000002773715024302467023436 0ustar rootroot// Copyright 2012 Jesse van den Kieboom. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package flags provides an extensive command line option parser. The flags package is similar in functionality to the go built-in flag package but provides more options and uses reflection to provide a convenient and succinct way of specifying command line options. # Supported features The following features are supported in go-flags: Options with short names (-v) Options with long names (--verbose) Options with and without arguments (bool v.s. other type) Options with optional arguments and default values Option default values from ENVIRONMENT_VARIABLES, including slice and map values Multiple option groups each containing a set of options Generate and print well-formatted help message Passing remaining command line arguments after -- (optional) Ignoring unknown command line options (optional) Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification Supports multiple short options -aux Supports all primitive go types (string, int{8..64}, uint{8..64}, float) Supports same option multiple times (can store in slice or last option counts) Supports maps Supports function callbacks Supports namespaces for (nested) option groups Additional features specific to Windows: Options with short names (/v) Options with long names (/verbose) Windows-style options with arguments use a colon as the delimiter Modify generated help message with Windows-style / options Windows style options can be disabled at build time using the "forceposix" build tag # Basic usage The flags package uses structs, reflection and struct field tags to allow users to specify command line options. This results in very simple and concise specification of your application options. For example: type Options struct { Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` } This specifies one option with a short name -v and a long name --verbose. When either -v or --verbose is found on the command line, a 'true' value will be appended to the Verbose field. e.g. when specifying -vvv, the resulting value of Verbose will be {[true, true, true]}. Slice options work exactly the same as primitive type options, except that whenever the option is encountered, a value is appended to the slice. Map options from string to primitive type are also supported. On the command line, you specify the value for such an option as key:value. For example type Options struct { AuthorInfo string[string] `short:"a"` } Then, the AuthorInfo map can be filled with something like -a name:Jesse -a "surname:van den Kieboom". Finally, for full control over the conversion between command line argument values and options, user defined types can choose to implement the Marshaler and Unmarshaler interfaces. # Available field tags The following is a list of tags for struct fields supported by go-flags: short: the short name of the option (single character) long: the long name of the option required: if non empty, makes the option required to appear on the command line. If a required option is not present, the parser will return ErrRequired (optional) description: the description of the option (optional) long-description: the long description of the option. Currently only displayed in generated man pages (optional) no-flag: if non-empty, this field is ignored as an option (optional) optional: if non-empty, makes the argument of the option optional. When an argument is optional it can only be specified using --option=argument (optional) optional-value: the value of an optional option when the option occurs without an argument. This tag can be specified multiple times in the case of maps or slices (optional) default: the default value of an option. This tag can be specified multiple times in the case of slices or maps (optional) default-mask: when specified, this value will be displayed in the help instead of the actual default value. This is useful mostly for hiding otherwise sensitive information from showing up in the help. If default-mask takes the special value "-", then no default value will be shown at all (optional) env: the default value of the option is overridden from the specified environment variable, if one has been defined. (optional) env-delim: the 'env' default value from environment is split into multiple values with the given delimiter string, use with slices and maps (optional) value-name: the name of the argument value (to be shown in the help) (optional) choice: limits the values for an option to a set of values. Repeat this tag once for each allowable value. e.g. `long:"animal" choice:"cat" choice:"dog"` hidden: if non-empty, the option is not visible in the help or man page. base: a base (radix) used to convert strings to integer values, the default base is 10 (i.e. decimal) (optional) ini-name: the explicit ini option name (optional) no-ini: if non-empty this field is ignored as an ini option (optional) group: when specified on a struct field, makes the struct field a separate group with the given name (optional) namespace: when specified on a group struct field, the namespace gets prepended to every option's long name and subgroup's namespace of this group, separated by the parser's namespace delimiter (optional) env-namespace: when specified on a group struct field, the env-namespace gets prepended to every option's env key and subgroup's env-namespace of this group, separated by the parser's env-namespace delimiter (optional) command: when specified on a struct field, makes the struct field a (sub)command with the given name (optional) subcommands-optional: when specified on a command struct field, makes any subcommands of that command optional (optional) alias: when specified on a command struct field, adds the specified name as an alias for the command. Can be be specified multiple times to add more than one alias (optional) positional-args: when specified on a field with a struct type, uses the fields of that struct to parse remaining positional command line arguments into (in order of the fields). If a field has a slice type, then all remaining arguments will be added to it. Positional arguments are optional by default, unless the "required" tag is specified together with the "positional-args" tag. The "required" tag can also be set on the individual rest argument fields, to require only the first N positional arguments. If the "required" tag is set on the rest arguments slice, then its value determines the minimum amount of rest arguments that needs to be provided (e.g. `required:"2"`) (optional) positional-arg-name: used on a field in a positional argument struct; name of the positional argument placeholder to be shown in the help (optional) Either the `short:` tag or the `long:` must be specified to make the field eligible as an option. # Option groups Option groups are a simple way to semantically separate your options. All options in a particular group are shown together in the help under the name of the group. Namespaces can be used to specify option long names more precisely and emphasize the options affiliation to their group. There are currently three ways to specify option groups. 1. Use NewNamedParser specifying the various option groups. 2. Use AddGroup to add a group to an existing parser. 3. Add a struct field to the top-level options annotated with the group:"group-name" tag. # Commands The flags package also has basic support for commands. Commands are often used in monolithic applications that support various commands or actions. Take git for example, all of the add, commit, checkout, etc. are called commands. Using commands you can easily separate multiple functions of your application. There are currently two ways to specify a command. 1. Use AddCommand on an existing parser. 2. Add a struct field to your options struct annotated with the command:"command-name" tag. The most common, idiomatic way to implement commands is to define a global parser instance and implement each command in a separate file. These command files should define a go init function which calls AddCommand on the global parser. When parsing ends and there is an active command and that command implements the Commander interface, then its Execute method will be run with the remaining command line arguments. Command structs can have options which become valid to parse after the command has been specified on the command line, in addition to the options of all the parent commands. I.e. considering a -v flag on the parser and an add command, the following are equivalent: ./app -v add ./app add -v However, if the -v flag is defined on the add command, then the first of the two examples above would fail since the -v flag is not defined before the add command. # Completion go-flags has builtin support to provide bash completion of flags, commands and argument values. To use completion, the binary which uses go-flags can be invoked in a special environment to list completion of the current command line argument. It should be noted that this `executes` your application, and it is up to the user to make sure there are no negative side effects (for example from init functions). Setting the environment variable `GO_FLAGS_COMPLETION=1` enables completion by replacing the argument parsing routine with the completion routine which outputs completions for the passed arguments. The basic invocation to complete a set of arguments is therefore: GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3 where `completion-example` is the binary, `arg1` and `arg2` are the current arguments, and `arg3` (the last argument) is the argument to be completed. If the GO_FLAGS_COMPLETION is set to "verbose", then descriptions of possible completion items will also be shown, if there are more than 1 completion items. To use this with bash completion, a simple file can be written which calls the binary which supports go-flags completion: _completion_example() { # All arguments except the first one args=("${COMP_WORDS[@]:1:$COMP_CWORD}") # Only split on newlines local IFS=$'\n' # Call completion (note that the first element of COMP_WORDS is # the executable itself) COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) return 0 } complete -F _completion_example completion-example Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set. Customized completion for argument values is supported by implementing the flags.Completer interface for the argument value type. An example of a type which does so is the flags.Filename type, an alias of string allowing simple filename completion. A slice or array argument value whose element type implements flags.Completer will also be completed. */ package flags dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/termsize_nosysioctl.go0000644000000000000000000000021515024302467026450 0ustar rootroot//go:build plan9 || appengine || wasm || aix // +build plan9 appengine wasm aix package flags func getTerminalColumns() int { return 80 } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/long_test.go0000644000000000000000000000337015024302467024323 0ustar rootrootpackage flags import ( "testing" ) func TestLong(t *testing.T) { var opts = struct { Value bool `long:"value"` }{} ret := assertParseSuccess(t, &opts, "--value") assertStringArray(t, ret, []string{}) if !opts.Value { t.Errorf("Expected Value to be true") } } func TestLongArg(t *testing.T) { var opts = struct { Value string `long:"value"` }{} ret := assertParseSuccess(t, &opts, "--value", "value") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "value") } func TestLongArgEqual(t *testing.T) { var opts = struct { Value string `long:"value"` }{} ret := assertParseSuccess(t, &opts, "--value=value") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "value") } func TestLongDefault(t *testing.T) { var opts = struct { Value string `long:"value" default:"value"` }{} ret := assertParseSuccess(t, &opts) assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "value") } func TestLongOptional(t *testing.T) { var opts = struct { Value string `long:"value" optional:"yes" optional-value:"value"` }{} ret := assertParseSuccess(t, &opts, "--value") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "value") } func TestLongOptionalArg(t *testing.T) { var opts = struct { Value string `long:"value" optional:"yes" optional-value:"value"` }{} ret := assertParseSuccess(t, &opts, "--value", "no") assertStringArray(t, ret, []string{"no"}) assertString(t, opts.Value, "value") } func TestLongOptionalArgEqual(t *testing.T) { var opts = struct { Value string `long:"value" optional:"yes" optional-value:"value"` }{} ret := assertParseSuccess(t, &opts, "--value=value", "no") assertStringArray(t, ret, []string{"no"}) assertString(t, opts.Value, "value") } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/group.go0000644000000000000000000002420615024302467023462 0ustar rootroot// Copyright 2012 Jesse van den Kieboom. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flags import ( "errors" "reflect" "strings" "unicode/utf8" ) // ErrNotPointerToStruct indicates that a provided data container is not // a pointer to a struct. Only pointers to structs are valid data containers // for options. var ErrNotPointerToStruct = errors.New("provided data is not a pointer to struct") // Group represents an option group. Option groups can be used to logically // group options together under a description. Groups are only used to provide // more structure to options both for the user (as displayed in the help message) // and for you, since groups can be nested. type Group struct { // A short description of the group. The // short description is primarily used in the built-in generated help // message ShortDescription string // A long description of the group. The long // description is primarily used to present information on commands // (Command embeds Group) in the built-in generated help and man pages. LongDescription string // The namespace of the group Namespace string // The environment namespace of the group EnvNamespace string // If true, the group is not displayed in the help or man page Hidden bool // The parent of the group or nil if it has no parent parent interface{} // All the options in the group options []*Option // All the subgroups groups []*Group // Whether the group represents the built-in help group isBuiltinHelp bool data interface{} } type scanHandler func(reflect.Value, *reflect.StructField) (bool, error) // AddGroup adds a new group to the command with the given name and data. The // data needs to be a pointer to a struct from which the fields indicate which // options are in the group. func (g *Group) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) { group := newGroup(shortDescription, longDescription, data) group.parent = g if err := group.scan(); err != nil { return nil, err } g.groups = append(g.groups, group) return group, nil } // AddOption adds a new option to this group. func (g *Group) AddOption(option *Option, data interface{}) { option.value = reflect.ValueOf(data) option.group = g g.options = append(g.options, option) } // Groups returns the list of groups embedded in this group. func (g *Group) Groups() []*Group { return g.groups } // Options returns the list of options in this group. func (g *Group) Options() []*Option { return g.options } // Find locates the subgroup with the given short description and returns it. // If no such group can be found Find will return nil. Note that the description // is matched case insensitively. func (g *Group) Find(shortDescription string) *Group { lshortDescription := strings.ToLower(shortDescription) var ret *Group g.eachGroup(func(gg *Group) { if gg != g && strings.ToLower(gg.ShortDescription) == lshortDescription { ret = gg } }) return ret } func (g *Group) findOption(matcher func(*Option) bool) (option *Option) { g.eachGroup(func(g *Group) { for _, opt := range g.options { if option == nil && matcher(opt) { option = opt } } }) return option } // FindOptionByLongName finds an option that is part of the group, or any of its // subgroups, by matching its long name (including the option namespace). func (g *Group) FindOptionByLongName(longName string) *Option { return g.findOption(func(option *Option) bool { return option.LongNameWithNamespace() == longName }) } // FindOptionByShortName finds an option that is part of the group, or any of // its subgroups, by matching its short name. func (g *Group) FindOptionByShortName(shortName rune) *Option { return g.findOption(func(option *Option) bool { return option.ShortName == shortName }) } func newGroup(shortDescription string, longDescription string, data interface{}) *Group { return &Group{ ShortDescription: shortDescription, LongDescription: longDescription, data: data, } } func (g *Group) optionByName(name string, namematch func(*Option, string) bool) *Option { prio := 0 var retopt *Option g.eachGroup(func(g *Group) { for _, opt := range g.options { if namematch != nil && namematch(opt, name) && prio < 4 { retopt = opt prio = 4 } if name == opt.field.Name && prio < 3 { retopt = opt prio = 3 } if name == opt.LongNameWithNamespace() && prio < 2 { retopt = opt prio = 2 } if opt.ShortName != 0 && name == string(opt.ShortName) && prio < 1 { retopt = opt prio = 1 } } }) return retopt } func (g *Group) showInHelp() bool { if g.Hidden { return false } for _, opt := range g.options { if opt.showInHelp() { return true } } return false } func (g *Group) eachGroup(f func(*Group)) { f(g) for _, gg := range g.groups { gg.eachGroup(f) } } func isStringFalsy(s string) bool { return s == "" || s == "false" || s == "no" || s == "0" } func (g *Group) scanStruct(realval reflect.Value, sfield *reflect.StructField, handler scanHandler) error { stype := realval.Type() if sfield != nil { if ok, err := handler(realval, sfield); err != nil { return err } else if ok { return nil } } for i := 0; i < stype.NumField(); i++ { field := stype.Field(i) // PkgName is set only for non-exported fields, which we ignore if field.PkgPath != "" && !field.Anonymous { continue } mtag := newMultiTag(string(field.Tag)) if err := mtag.Parse(); err != nil { return err } // Skip fields with the no-flag tag if mtag.Get("no-flag") != "" { continue } // Dive deep into structs or pointers to structs kind := field.Type.Kind() fld := realval.Field(i) if kind == reflect.Struct { if err := g.scanStruct(fld, &field, handler); err != nil { return err } } else if kind == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { flagCountBefore := len(g.options) + len(g.groups) if fld.IsNil() { fld = reflect.New(fld.Type().Elem()) } if err := g.scanStruct(reflect.Indirect(fld), &field, handler); err != nil { return err } if len(g.options)+len(g.groups) != flagCountBefore { realval.Field(i).Set(fld) } } longname := mtag.Get("long") shortname := mtag.Get("short") // Need at least either a short or long name if longname == "" && shortname == "" && mtag.Get("ini-name") == "" { continue } short := rune(0) rc := utf8.RuneCountInString(shortname) if rc > 1 { return newErrorf(ErrShortNameTooLong, "short names can only be 1 character long, not `%s'", shortname) } else if rc == 1 { short, _ = utf8.DecodeRuneInString(shortname) } description := mtag.Get("description") def := mtag.GetMany("default") optionalValue := mtag.GetMany("optional-value") valueName := mtag.Get("value-name") defaultMask := mtag.Get("default-mask") optional := !isStringFalsy(mtag.Get("optional")) required := !isStringFalsy(mtag.Get("required")) choices := mtag.GetMany("choice") hidden := !isStringFalsy(mtag.Get("hidden")) option := &Option{ Description: description, ShortName: short, LongName: longname, Default: def, EnvDefaultKey: mtag.Get("env"), EnvDefaultDelim: mtag.Get("env-delim"), OptionalArgument: optional, OptionalValue: optionalValue, Required: required, ValueName: valueName, DefaultMask: defaultMask, Choices: choices, Hidden: hidden, group: g, field: field, value: realval.Field(i), tag: mtag, } if option.isBool() && option.Default != nil { return newErrorf(ErrInvalidTag, "boolean flag `%s' may not have default values, they always default to `false' and can only be turned on", option.shortAndLongName()) } g.options = append(g.options, option) } return nil } func (g *Group) checkForDuplicateFlags() *Error { shortNames := make(map[rune]*Option) longNames := make(map[string]*Option) var duplicateError *Error g.eachGroup(func(g *Group) { for _, option := range g.options { if option.LongName != "" { longName := option.LongNameWithNamespace() if otherOption, ok := longNames[longName]; ok { duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same long name as option `%s'", option, otherOption) return } longNames[longName] = option } if option.ShortName != 0 { if otherOption, ok := shortNames[option.ShortName]; ok { duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same short name as option `%s'", option, otherOption) return } shortNames[option.ShortName] = option } } }) return duplicateError } func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.StructField) (bool, error) { mtag := newMultiTag(string(sfield.Tag)) if err := mtag.Parse(); err != nil { return true, err } subgroup := mtag.Get("group") if len(subgroup) != 0 { var ptrval reflect.Value if realval.Kind() == reflect.Ptr { ptrval = realval if ptrval.IsNil() { ptrval.Set(reflect.New(ptrval.Type())) } } else { ptrval = realval.Addr() } description := mtag.Get("description") group, err := g.AddGroup(subgroup, description, ptrval.Interface()) if err != nil { return true, err } group.Namespace = mtag.Get("namespace") group.EnvNamespace = mtag.Get("env-namespace") group.Hidden = mtag.Get("hidden") != "" return true, nil } return false, nil } func (g *Group) scanType(handler scanHandler) error { // Get all the public fields in the data struct ptrval := reflect.ValueOf(g.data) if ptrval.Type().Kind() != reflect.Ptr { panic(ErrNotPointerToStruct) } stype := ptrval.Type().Elem() if stype.Kind() != reflect.Struct { panic(ErrNotPointerToStruct) } realval := reflect.Indirect(ptrval) if err := g.scanStruct(realval, nil, handler); err != nil { return err } if err := g.checkForDuplicateFlags(); err != nil { return err } return nil } func (g *Group) scan() error { return g.scanType(g.scanSubGroupHandler) } func (g *Group) groupByName(name string) *Group { if len(name) == 0 { return g } return g.Find(name) } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/example_test.go0000644000000000000000000000603615024302467025021 0ustar rootroot// Example of use of the flags package. package flags import ( "fmt" "os/exec" ) func Example() { var opts struct { // Slice of bool will append 'true' each time the option // is encountered (can be set multiple times, like -vvv) Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` // Example of automatic marshalling to desired type (uint) Offset uint `long:"offset" description:"Offset"` // Example of a callback, called each time the option is found. Call func(string) `short:"c" description:"Call phone number"` // Example of a required flag Name string `short:"n" long:"name" description:"A name" required:"true"` // Example of a value name File string `short:"f" long:"file" description:"A file" value-name:"FILE"` // Example of a pointer Ptr *int `short:"p" description:"A pointer to an integer"` // Example of a slice of strings StringSlice []string `short:"s" description:"A slice of strings"` // Example of a slice of pointers PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` // Example of a map IntMap map[string]int `long:"intmap" description:"A map from string to int"` // Example of a filename (useful for completion) Filename Filename `long:"filename" description:"A filename"` // Example of positional arguments Args struct { ID string Num int Rest []string } `positional-args:"yes" required:"yes"` } // Callback which will invoke callto: to call a number. // Note that this works just on OS X (and probably only with // Skype) but it shows the idea. opts.Call = func(num string) { cmd := exec.Command("open", "callto:"+num) cmd.Start() cmd.Process.Release() } // Make some fake arguments to parse. args := []string{ "-vv", "--offset=5", "-n", "Me", "-p", "3", "-s", "hello", "-s", "world", "--ptrslice", "hello", "--ptrslice", "world", "--intmap", "a:1", "--intmap", "b:5", "--filename", "hello.go", "id", "10", "remaining1", "remaining2", } // Parse flags from `args'. Note that here we use flags.ParseArgs for // the sake of making a working example. Normally, you would simply use // flags.Parse(&opts) which uses os.Args _, err := ParseArgs(&opts, args) if err != nil { panic(err) } fmt.Printf("Verbosity: %v\n", opts.Verbose) fmt.Printf("Offset: %d\n", opts.Offset) fmt.Printf("Name: %s\n", opts.Name) fmt.Printf("Ptr: %d\n", *opts.Ptr) fmt.Printf("StringSlice: %v\n", opts.StringSlice) fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1]) fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"]) fmt.Printf("Filename: %v\n", opts.Filename) fmt.Printf("Args.ID: %s\n", opts.Args.ID) fmt.Printf("Args.Num: %d\n", opts.Args.Num) fmt.Printf("Args.Rest: %v\n", opts.Args.Rest) // Output: Verbosity: [true true] // Offset: 5 // Name: Me // Ptr: 3 // StringSlice: [hello world] // PtrSlice: [hello world] // IntMap: [a:1 b:5] // Filename: hello.go // Args.ID: id // Args.Num: 10 // Args.Rest: [remaining1 remaining2] } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/ini.go0000644000000000000000000003227515024302467023112 0ustar rootrootpackage flags import ( "bufio" "fmt" "io" "os" "reflect" "sort" "strconv" "strings" ) // IniError contains location information on where an error occurred. type IniError struct { // The error message. Message string // The filename of the file in which the error occurred. File string // The line number at which the error occurred. LineNumber uint } // Error provides a "file:line: message" formatted message of the ini error. func (x *IniError) Error() string { return fmt.Sprintf( "%s:%d: %s", x.File, x.LineNumber, x.Message, ) } // IniOptions for writing type IniOptions uint const ( // IniNone indicates no options. IniNone IniOptions = 0 // IniIncludeDefaults indicates that default values should be written. IniIncludeDefaults = 1 << iota // IniCommentDefaults indicates that if IniIncludeDefaults is used // options with default values are written but commented out. IniCommentDefaults // IniIncludeComments indicates that comments containing the description // of an option should be written. IniIncludeComments // IniDefault provides a default set of options. IniDefault = IniIncludeComments ) // IniParser is a utility to read and write flags options from and to ini // formatted strings. type IniParser struct { ParseAsDefaults bool // override default flags parser *Parser } type iniValue struct { Name string Value string Quoted bool LineNumber uint } type iniSection []iniValue type ini struct { File string Sections map[string]iniSection } // NewIniParser creates a new ini parser for a given Parser. func NewIniParser(p *Parser) *IniParser { return &IniParser{ parser: p, } } // IniParse is a convenience function to parse command line options with default // settings from an ini formatted file. The provided data is a pointer to a struct // representing the default option group (named "Application Options"). For // more control, use flags.NewParser. func IniParse(filename string, data interface{}) error { p := NewParser(data, Default) return NewIniParser(p).ParseFile(filename) } // ParseFile parses flags from an ini formatted file. See Parse for more // information on the ini file format. The returned errors can be of the type // flags.Error or flags.IniError. func (i *IniParser) ParseFile(filename string) error { ini, err := readIniFromFile(filename) if err != nil { return err } return i.parse(ini) } // Parse parses flags from an ini format. You can use ParseFile as a // convenience function to parse from a filename instead of a general // io.Reader. // // The format of the ini file is as follows: // // [Option group name] // option = value // // Each section in the ini file represents an option group or command in the // flags parser. The default flags parser option group (i.e. when using // flags.Parse) is named 'Application Options'. The ini option name is matched // in the following order: // // 1. Compared to the ini-name tag on the option struct field (if present) // 2. Compared to the struct field name // 3. Compared to the option long name (if present) // 4. Compared to the option short name (if present) // // Sections for nested groups and commands can be addressed using a dot `.' // namespacing notation (i.e [subcommand.Options]). Group section names are // matched case insensitive. // // The returned errors can be of the type flags.Error or flags.IniError. func (i *IniParser) Parse(reader io.Reader) error { ini, err := readIni(reader, "") if err != nil { return err } return i.parse(ini) } // WriteFile writes the flags as ini format into a file. See Write // for more information. The returned error occurs when the specified file // could not be opened for writing. func (i *IniParser) WriteFile(filename string, options IniOptions) error { return writeIniToFile(i, filename, options) } // Write writes the current values of all the flags to an ini format. // See Parse for more information on the ini file format. You typically // call this only after settings have been parsed since the default values of each // option are stored just before parsing the flags (this is only relevant when // IniIncludeDefaults is _not_ set in options). func (i *IniParser) Write(writer io.Writer, options IniOptions) { writeIni(i, writer, options) } func readFullLine(reader *bufio.Reader) (string, error) { var line []byte for { l, more, err := reader.ReadLine() if err != nil { return "", err } if line == nil && !more { return string(l), nil } line = append(line, l...) if !more { break } } return string(line), nil } func optionIniName(option *Option) string { name := option.tag.Get("_read-ini-name") if len(name) != 0 { return name } name = option.tag.Get("ini-name") if len(name) != 0 { return name } return option.field.Name } func writeGroupIni(cmd *Command, group *Group, namespace string, writer io.Writer, options IniOptions) { var sname string if len(namespace) != 0 { sname = namespace } if cmd.Group != group && len(group.ShortDescription) != 0 { if len(sname) != 0 { sname += "." } sname += group.ShortDescription } sectionwritten := false comments := (options & IniIncludeComments) != IniNone for _, option := range group.options { if option.isFunc() || option.Hidden { continue } if len(option.tag.Get("no-ini")) != 0 { continue } val := option.value if (options&IniIncludeDefaults) == IniNone && option.valueIsDefault() { continue } if !sectionwritten { fmt.Fprintf(writer, "[%s]\n", sname) sectionwritten = true } if comments && len(option.Description) != 0 { fmt.Fprintf(writer, "; %s\n", option.Description) } oname := optionIniName(option) commentOption := (options&(IniIncludeDefaults|IniCommentDefaults)) == IniIncludeDefaults|IniCommentDefaults && option.valueIsDefault() kind := val.Type().Kind() switch kind { case reflect.Slice: kind = val.Type().Elem().Kind() if val.Len() == 0 { writeOption(writer, oname, kind, "", "", true, option.iniQuote) } else { for idx := 0; idx < val.Len(); idx++ { v, _ := convertToString(val.Index(idx), option.tag) writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote) } } case reflect.Map: kind = val.Type().Elem().Kind() if val.Len() == 0 { writeOption(writer, oname, kind, "", "", true, option.iniQuote) } else { mkeys := val.MapKeys() keys := make([]string, len(val.MapKeys())) kkmap := make(map[string]reflect.Value) for i, k := range mkeys { keys[i], _ = convertToString(k, option.tag) kkmap[keys[i]] = k } sort.Strings(keys) for _, k := range keys { v, _ := convertToString(val.MapIndex(kkmap[k]), option.tag) writeOption(writer, oname, kind, k, v, commentOption, option.iniQuote) } } default: v, _ := convertToString(val, option.tag) writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote) } if comments { fmt.Fprintln(writer) } } if sectionwritten && !comments { fmt.Fprintln(writer) } } func writeOption(writer io.Writer, optionName string, optionType reflect.Kind, optionKey string, optionValue string, commentOption bool, forceQuote bool) { if forceQuote || (optionType == reflect.String && !isPrint(optionValue)) { optionValue = strconv.Quote(optionValue) } comment := "" if commentOption { comment = "; " } fmt.Fprintf(writer, "%s%s =", comment, optionName) if optionKey != "" { fmt.Fprintf(writer, " %s:%s", optionKey, optionValue) } else if optionValue != "" { fmt.Fprintf(writer, " %s", optionValue) } fmt.Fprintln(writer) } func writeCommandIni(command *Command, namespace string, writer io.Writer, options IniOptions) { command.eachGroup(func(group *Group) { if !group.Hidden { writeGroupIni(command, group, namespace, writer, options) } }) for _, c := range command.commands { var fqn string if c.Hidden { continue } if len(namespace) != 0 { fqn = namespace + "." + c.Name } else { fqn = c.Name } writeCommandIni(c, fqn, writer, options) } } func writeIni(parser *IniParser, writer io.Writer, options IniOptions) { writeCommandIni(parser.parser.Command, "", writer, options) } func writeIniToFile(parser *IniParser, filename string, options IniOptions) error { file, err := os.Create(filename) if err != nil { return err } defer file.Close() writeIni(parser, file, options) return nil } func readIniFromFile(filename string) (*ini, error) { file, err := os.Open(filename) if err != nil { return nil, err } defer file.Close() return readIni(file, filename) } func readIni(contents io.Reader, filename string) (*ini, error) { ret := &ini{ File: filename, Sections: make(map[string]iniSection), } reader := bufio.NewReader(contents) // Empty global section section := make(iniSection, 0, 10) sectionname := "" ret.Sections[sectionname] = section var lineno uint for { line, err := readFullLine(reader) if err == io.EOF { break } else if err != nil { return nil, err } lineno++ line = strings.TrimSpace(line) // Skip empty lines and lines starting with ; (comments) if len(line) == 0 || line[0] == ';' || line[0] == '#' { continue } if line[0] == '[' { if line[0] != '[' || line[len(line)-1] != ']' { return nil, &IniError{ Message: "malformed section header", File: filename, LineNumber: lineno, } } name := strings.TrimSpace(line[1 : len(line)-1]) if len(name) == 0 { return nil, &IniError{ Message: "empty section name", File: filename, LineNumber: lineno, } } sectionname = name section = ret.Sections[name] if section == nil { section = make(iniSection, 0, 10) ret.Sections[name] = section } continue } // Parse option here keyval := strings.SplitN(line, "=", 2) if len(keyval) != 2 { return nil, &IniError{ Message: fmt.Sprintf("malformed key=value (%s)", line), File: filename, LineNumber: lineno, } } name := strings.TrimSpace(keyval[0]) value := strings.TrimSpace(keyval[1]) quoted := false if len(value) != 0 && value[0] == '"' { if v, err := strconv.Unquote(value); err == nil { value = v quoted = true } else { return nil, &IniError{ Message: err.Error(), File: filename, LineNumber: lineno, } } } section = append(section, iniValue{ Name: name, Value: value, Quoted: quoted, LineNumber: lineno, }) ret.Sections[sectionname] = section } return ret, nil } func (i *IniParser) matchingGroups(name string) []*Group { if len(name) == 0 { var ret []*Group i.parser.eachGroup(func(g *Group) { ret = append(ret, g) }) return ret } g := i.parser.groupByName(name) if g != nil { return []*Group{g} } return nil } func (i *IniParser) parse(ini *ini) error { p := i.parser p.eachOption(func(cmd *Command, group *Group, option *Option) { option.clearReferenceBeforeSet = true }) var quotesLookup = make(map[*Option]bool) for name, section := range ini.Sections { groups := i.matchingGroups(name) if len(groups) == 0 { if (p.Options & IgnoreUnknown) == None { return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name) } continue } for _, inival := range section { var opt *Option for _, group := range groups { opt = group.optionByName(inival.Name, func(o *Option, n string) bool { return strings.ToLower(o.tag.Get("ini-name")) == strings.ToLower(n) }) if opt != nil && len(opt.tag.Get("no-ini")) != 0 { opt = nil } if opt != nil { break } } if opt == nil { if (p.Options & IgnoreUnknown) == None { return &IniError{ Message: fmt.Sprintf("unknown option: %s", inival.Name), File: ini.File, LineNumber: inival.LineNumber, } } continue } // ini value is ignored if parsed as default but defaults are prevented if i.ParseAsDefaults && opt.preventDefault { continue } pval := &inival.Value if !opt.canArgument() && len(inival.Value) == 0 { pval = nil } else { if opt.value.Type().Kind() == reflect.Map { parts := strings.SplitN(inival.Value, ":", 2) // only handle unquoting if len(parts) == 2 && parts[1][0] == '"' { if v, err := strconv.Unquote(parts[1]); err == nil { parts[1] = v inival.Quoted = true } else { return &IniError{ Message: err.Error(), File: ini.File, LineNumber: inival.LineNumber, } } s := parts[0] + ":" + parts[1] pval = &s } } } var err error if i.ParseAsDefaults { err = opt.setDefault(pval) } else { err = opt.Set(pval) } if err != nil { return &IniError{ Message: err.Error(), File: ini.File, LineNumber: inival.LineNumber, } } // Defaults from ini files take precendence over defaults from parser opt.preventDefault = true // either all INI values are quoted or only values who need quoting if _, ok := quotesLookup[opt]; !inival.Quoted || !ok { quotesLookup[opt] = inival.Quoted } opt.tag.Set("_read-ini-name", inival.Name) } } for opt, quoted := range quotesLookup { opt.iniQuote = quoted } return nil } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/group_test.go0000644000000000000000000001307315024302467024521 0ustar rootrootpackage flags import ( "testing" ) func TestGroupInline(t *testing.T) { var opts = struct { Value bool `short:"v"` Group struct { G bool `short:"g"` } `group:"Grouped Options"` }{} p, ret := assertParserSuccess(t, &opts, "-v", "-g") assertStringArray(t, ret, []string{}) if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Group.G { t.Errorf("Expected Group.G to be true") } if p.Command.Group.Find("Grouped Options") == nil { t.Errorf("Expected to find group `Grouped Options'") } } func TestGroupAdd(t *testing.T) { var opts = struct { Value bool `short:"v"` }{} var grp = struct { G bool `short:"g"` }{} p := NewParser(&opts, Default) g, err := p.AddGroup("Grouped Options", "", &grp) if err != nil { t.Fatalf("Unexpected error: %v", err) return } ret, err := p.ParseArgs([]string{"-v", "-g", "rest"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } assertStringArray(t, ret, []string{"rest"}) if !opts.Value { t.Errorf("Expected Value to be true") } if !grp.G { t.Errorf("Expected Group.G to be true") } if p.Command.Group.Find("Grouped Options") != g { t.Errorf("Expected to find group `Grouped Options'") } if p.Groups()[1] != g { t.Errorf("Expected group %#v, but got %#v", g, p.Groups()[0]) } if g.Options()[0].ShortName != 'g' { t.Errorf("Expected short name `g' but got %v", g.Options()[0].ShortName) } } func TestGroupNestedInline(t *testing.T) { var opts = struct { Value bool `short:"v"` Group struct { G bool `short:"g"` Nested struct { N string `long:"n"` } `group:"Nested Options"` } `group:"Grouped Options"` }{} p, ret := assertParserSuccess(t, &opts, "-v", "-g", "--n", "n", "rest") assertStringArray(t, ret, []string{"rest"}) if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Group.G { t.Errorf("Expected Group.G to be true") } assertString(t, opts.Group.Nested.N, "n") if p.Command.Group.Find("Grouped Options") == nil { t.Errorf("Expected to find group `Grouped Options'") } if p.Command.Group.Find("Nested Options") == nil { t.Errorf("Expected to find group `Nested Options'") } } func TestGroupNestedInlineNamespace(t *testing.T) { var opts = struct { Opt string `long:"opt"` Group struct { Opt string `long:"opt"` Group struct { Opt string `long:"opt"` } `group:"Subsubgroup" namespace:"sap"` } `group:"Subgroup" namespace:"sip"` }{} p, ret := assertParserSuccess(t, &opts, "--opt", "a", "--sip.opt", "b", "--sip.sap.opt", "c", "rest") assertStringArray(t, ret, []string{"rest"}) assertString(t, opts.Opt, "a") assertString(t, opts.Group.Opt, "b") assertString(t, opts.Group.Group.Opt, "c") for _, name := range []string{"Subgroup", "Subsubgroup"} { if p.Command.Group.Find(name) == nil { t.Errorf("Expected to find group '%s'", name) } } } func TestDuplicateShortFlags(t *testing.T) { var opts struct { Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` Variables []string `short:"v" long:"variable" description:"Set a variable value."` } args := []string{ "--verbose", "-v", "123", "-v", "456", } _, err := ParseArgs(&opts, args) if err == nil { t.Errorf("Expected an error with type ErrDuplicatedFlag") } else { err2 := err.(*Error) if err2.Type != ErrDuplicatedFlag { t.Errorf("Expected an error with type ErrDuplicatedFlag") } } } func TestDuplicateLongFlags(t *testing.T) { var opts struct { Test1 []bool `short:"a" long:"testing" description:"Test 1"` Test2 []string `short:"b" long:"testing" description:"Test 2."` } args := []string{ "--testing", } _, err := ParseArgs(&opts, args) if err == nil { t.Errorf("Expected an error with type ErrDuplicatedFlag") } else { err2 := err.(*Error) if err2.Type != ErrDuplicatedFlag { t.Errorf("Expected an error with type ErrDuplicatedFlag") } } } func TestFindOptionByLongFlag(t *testing.T) { var opts struct { Testing bool `long:"testing" description:"Testing"` } p := NewParser(&opts, Default) opt := p.FindOptionByLongName("testing") if opt == nil { t.Errorf("Expected option, but found none") } assertString(t, opt.LongName, "testing") } func TestFindOptionByShortFlag(t *testing.T) { var opts struct { Testing bool `short:"t" description:"Testing"` } p := NewParser(&opts, Default) opt := p.FindOptionByShortName('t') if opt == nil { t.Errorf("Expected option, but found none") } if opt.ShortName != 't' { t.Errorf("Expected 't', but got %v", opt.ShortName) } } func TestFindOptionByLongFlagInSubGroup(t *testing.T) { var opts struct { Group struct { Testing bool `long:"testing" description:"Testing"` } `group:"sub-group"` } p := NewParser(&opts, Default) opt := p.FindOptionByLongName("testing") if opt == nil { t.Errorf("Expected option, but found none") } assertString(t, opt.LongName, "testing") } func TestFindOptionByShortFlagInSubGroup(t *testing.T) { var opts struct { Group struct { Testing bool `short:"t" description:"Testing"` } `group:"sub-group"` } p := NewParser(&opts, Default) opt := p.FindOptionByShortName('t') if opt == nil { t.Errorf("Expected option, but found none") } if opt.ShortName != 't' { t.Errorf("Expected 't', but got %v", opt.ShortName) } } func TestAddOptionNonOptional(t *testing.T) { var opts struct { Test bool } p := NewParser(&opts, Default) p.AddOption(&Option{ LongName: "test", }, &opts.Test) _, err := p.ParseArgs([]string{"--test"}) if err != nil { t.Errorf("unexpected error: %s", err) } else if !opts.Test { t.Errorf("option not set") } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/optstyle_other.go0000644000000000000000000000321015024302467025402 0ustar rootroot//go:build !windows || forceposix // +build !windows forceposix package flags import ( "strings" ) const ( defaultShortOptDelimiter = '-' defaultLongOptDelimiter = "--" defaultNameArgDelimiter = '=' ) func argumentStartsOption(arg string) bool { return len(arg) > 0 && arg[0] == '-' } func argumentIsOption(arg string) bool { if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' { return true } if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' { return true } return false } // stripOptionPrefix returns the option without the prefix and whether or // not the option is a long option or not. func stripOptionPrefix(optname string) (prefix string, name string, islong bool) { if strings.HasPrefix(optname, "--") { return "--", optname[2:], true } else if strings.HasPrefix(optname, "-") { return "-", optname[1:], false } return "", optname, false } // splitOption attempts to split the passed option into a name and an argument. // When there is no argument specified, nil will be returned for it. func splitOption(prefix string, option string, islong bool) (string, string, *string) { pos := strings.Index(option, "=") if (islong && pos >= 0) || (!islong && pos == 1) { rest := option[pos+1:] return option[:pos], "=", &rest } return option, "", nil } // addHelpGroup adds a new group that contains default help parameters. func (c *Command) addHelpGroup(showHelp func() error) *Group { var help struct { ShowHelp func() error `short:"h" long:"help" description:"Show this help message"` } help.ShowHelp = showHelp ret, _ := c.AddGroup("Help Options", "", &help) ret.isBuiltinHelp = true return ret } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/arg.go0000644000000000000000000000104415024302467023072 0ustar rootrootpackage flags import ( "reflect" ) // Arg represents a positional argument on the command line. type Arg struct { // The name of the positional argument (used in the help) Name string // A description of the positional argument (used in the help) Description string // The minimal number of required positional arguments Required int // The maximum number of required positional arguments RequiredMaximum int value reflect.Value tag multiTag } func (a *Arg) isRemaining() bool { return a.value.Type().Kind() == reflect.Slice } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/arg_test.go0000644000000000000000000000732515024302467024141 0ustar rootrootpackage flags import ( "testing" ) func TestPositional(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Command int Filename string Rest []string } `positional-args:"yes" required:"yes"` }{} p := NewParser(&opts, Default) ret, err := p.ParseArgs([]string{"10", "arg_test.go", "a", "b"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if opts.Positional.Command != 10 { t.Fatalf("Expected opts.Positional.Command to be 10, but got %v", opts.Positional.Command) } if opts.Positional.Filename != "arg_test.go" { t.Fatalf("Expected opts.Positional.Filename to be \"arg_test.go\", but got %v", opts.Positional.Filename) } assertStringArray(t, opts.Positional.Rest, []string{"a", "b"}) assertStringArray(t, ret, []string{}) } func TestPositionalRequired(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Command int Filename string Rest []string } `positional-args:"yes" required:"yes"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{"10"}) assertError(t, err, ErrRequired, "the required argument `Filename` was not provided") } func TestPositionalRequiredRest1Fail(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []string `required:"yes"` } `positional-args:"yes"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{}) assertError(t, err, ErrRequired, "the required argument `Rest (at least 1 argument)` was not provided") } func TestPositionalRequiredRest1Pass(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []string `required:"yes"` } `positional-args:"yes"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{"rest1"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if len(opts.Positional.Rest) != 1 { t.Fatalf("Expected 1 positional rest argument") } assertString(t, opts.Positional.Rest[0], "rest1") } func TestPositionalRequiredRest2Fail(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []string `required:"2"` } `positional-args:"yes"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{"rest1"}) assertError(t, err, ErrRequired, "the required argument `Rest (at least 2 arguments, but got only 1)` was not provided") } func TestPositionalRequiredRest2Pass(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []string `required:"2"` } `positional-args:"yes"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{"rest1", "rest2", "rest3"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if len(opts.Positional.Rest) != 3 { t.Fatalf("Expected 3 positional rest argument") } assertString(t, opts.Positional.Rest[0], "rest1") assertString(t, opts.Positional.Rest[1], "rest2") assertString(t, opts.Positional.Rest[2], "rest3") } func TestPositionalRequiredRestRangeFail(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []string `required:"1-2"` } `positional-args:"yes"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{"rest1", "rest2", "rest3"}) assertError(t, err, ErrRequired, "the required argument `Rest (at most 2 arguments, but got 3)` was not provided") } func TestPositionalRequiredRestRangeEmptyFail(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []string `required:"0-0"` } `positional-args:"yes"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{"some", "thing"}) assertError(t, err, ErrRequired, "the required argument `Rest (zero arguments)` was not provided") } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/go.sum0000644000000000000000000000023115024302467023122 0ustar rootrootgolang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/completion_test.go0000644000000000000000000001716215024302467025541 0ustar rootrootpackage flags import ( "bytes" "io" "os" "path" "path/filepath" "reflect" "runtime" "strings" "testing" ) type TestComplete struct { } func (t *TestComplete) Complete(match string) []Completion { options := []string{ "hello world", "hello universe", "hello multiverse", } ret := make([]Completion, 0, len(options)) for _, o := range options { if strings.HasPrefix(o, match) { ret = append(ret, Completion{ Item: o, }) } } return ret } var completionTestOptions struct { Verbose bool `short:"v" long:"verbose" description:"Verbose messages"` Debug bool `short:"d" long:"debug" description:"Enable debug"` Info bool `short:"i" description:"Display info"` Version bool `long:"version" description:"Show version"` Required bool `long:"required" required:"true" description:"This is required"` Hidden bool `long:"hidden" hidden:"true" description:"This is hidden"` AddCommand struct { Positional struct { Filename Filename } `positional-args:"yes"` } `command:"add" description:"add an item"` AddMultiCommand struct { Positional struct { Filename []Filename } `positional-args:"yes"` Extra []Filename `short:"f"` } `command:"add-multi" description:"add multiple items"` AddMultiCommandFlag struct { Files []Filename `short:"f"` } `command:"add-multi-flag" description:"add multiple items via flags"` RemoveCommand struct { Other bool `short:"o"` File Filename `short:"f" long:"filename"` } `command:"rm" description:"remove an item"` RenameCommand struct { Completed TestComplete `short:"c" long:"completed"` } `command:"rename" description:"rename an item"` HiddenCommand struct { } `command:"hidden" description:"hidden command" hidden:"true"` } type completionTest struct { Args []string Completed []string ShowDescriptions bool } var completionTests []completionTest func init() { _, sourcefile, _, _ := runtime.Caller(0) completionTestSourcedir := filepath.Join(filepath.SplitList(path.Dir(sourcefile))...) completionTestFilename := []string{filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion_test.go")} completionTestSubdir := []string{ filepath.Join(completionTestSourcedir, "examples/add.go"), filepath.Join(completionTestSourcedir, "examples/bash-completion"), filepath.Join(completionTestSourcedir, "examples/main.go"), filepath.Join(completionTestSourcedir, "examples/rm.go"), } completionTests = []completionTest{ { // Short names []string{"-"}, []string{"--debug", "--required", "--verbose", "--version", "-i"}, false, }, { // Short names full []string{"-i"}, []string{"-i"}, false, }, { // Short names concatenated []string{"-dv"}, []string{"-dv"}, false, }, { // Long names []string{"--"}, []string{"--debug", "--required", "--verbose", "--version"}, false, }, { // Long names with descriptions []string{"--"}, []string{ "--debug # Enable debug", "--required # This is required", "--verbose # Verbose messages", "--version # Show version", }, true, }, { // Long names partial []string{"--ver"}, []string{"--verbose", "--version"}, false, }, { // Commands []string{""}, []string{"add", "add-multi", "add-multi-flag", "rename", "rm"}, false, }, { // Commands with descriptions []string{""}, []string{ "add # add an item", "add-multi # add multiple items", "add-multi-flag # add multiple items via flags", "rename # rename an item", "rm # remove an item", }, true, }, { // Commands partial []string{"r"}, []string{"rename", "rm"}, false, }, { // Positional filename []string{"add", filepath.Join(completionTestSourcedir, "completion")}, completionTestFilename, false, }, { // Multiple positional filename (1 arg) []string{"add-multi", filepath.Join(completionTestSourcedir, "completion")}, completionTestFilename, false, }, { // Multiple positional filename (2 args) []string{"add-multi", filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion")}, completionTestFilename, false, }, { // Multiple positional filename (3 args) []string{"add-multi", filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion")}, completionTestFilename, false, }, { // Flag filename []string{"rm", "-f", path.Join(completionTestSourcedir, "completion")}, completionTestFilename, false, }, { // Flag short concat last filename []string{"rm", "-of", path.Join(completionTestSourcedir, "completion")}, completionTestFilename, false, }, { // Flag concat filename []string{"rm", "-f" + path.Join(completionTestSourcedir, "completion")}, []string{"-f" + completionTestFilename[0], "-f" + completionTestFilename[1]}, false, }, { // Flag equal concat filename []string{"rm", "-f=" + path.Join(completionTestSourcedir, "completion")}, []string{"-f=" + completionTestFilename[0], "-f=" + completionTestFilename[1]}, false, }, { // Flag concat long filename []string{"rm", "--filename=" + path.Join(completionTestSourcedir, "completion")}, []string{"--filename=" + completionTestFilename[0], "--filename=" + completionTestFilename[1]}, false, }, { // Flag long filename []string{"rm", "--filename", path.Join(completionTestSourcedir, "completion")}, completionTestFilename, false, }, { // To subdir []string{"rm", "--filename", path.Join(completionTestSourcedir, "examples/bash-")}, []string{path.Join(completionTestSourcedir, "examples/bash-completion/")}, false, }, { // Subdirectory []string{"rm", "--filename", path.Join(completionTestSourcedir, "examples") + "/"}, completionTestSubdir, false, }, { // Custom completed []string{"rename", "-c", "hello un"}, []string{"hello universe"}, false, }, { // Multiple flag filename []string{"add-multi-flag", "-f", filepath.Join(completionTestSourcedir, "completion")}, completionTestFilename, false, }, } } func TestCompletion(t *testing.T) { p := NewParser(&completionTestOptions, Default) c := &completion{parser: p} for _, test := range completionTests { if test.ShowDescriptions { continue } ret := c.complete(test.Args) items := make([]string, len(ret)) for i, v := range ret { items[i] = v.Item } if !reflect.DeepEqual(items, test.Completed) { t.Errorf("Args: %#v, %#v\n Expected: %#v\n Got: %#v", test.Args, test.ShowDescriptions, test.Completed, items) } } } func TestParserCompletion(t *testing.T) { for _, test := range completionTests { if test.ShowDescriptions { os.Setenv("GO_FLAGS_COMPLETION", "verbose") } else { os.Setenv("GO_FLAGS_COMPLETION", "1") } tmp := os.Stdout r, w, _ := os.Pipe() os.Stdout = w out := make(chan string) go func() { var buf bytes.Buffer io.Copy(&buf, r) out <- buf.String() }() p := NewParser(&completionTestOptions, None) p.CompletionHandler = func(items []Completion) { comp := &completion{parser: p} comp.print(items, test.ShowDescriptions) } _, err := p.ParseArgs(test.Args) w.Close() os.Stdout = tmp if err != nil { t.Fatalf("Unexpected error: %s", err) } got := strings.Split(strings.Trim(<-out, "\n"), "\n") if !reflect.DeepEqual(got, test.Completed) { t.Errorf("Expected: %#v\nGot: %#v", test.Completed, got) } } os.Setenv("GO_FLAGS_COMPLETION", "") } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/error.go0000644000000000000000000000576415024302467023467 0ustar rootrootpackage flags import ( "fmt" ) // ErrorType represents the type of error. type ErrorType uint const ( // ErrUnknown indicates a generic error. ErrUnknown ErrorType = iota // ErrExpectedArgument indicates that an argument was expected. ErrExpectedArgument // ErrUnknownFlag indicates an unknown flag. ErrUnknownFlag // ErrUnknownGroup indicates an unknown group. ErrUnknownGroup // ErrMarshal indicates a marshalling error while converting values. ErrMarshal // ErrHelp indicates that the built-in help was shown (the error // contains the help message). ErrHelp // ErrNoArgumentForBool indicates that an argument was given for a // boolean flag (which don't not take any arguments). ErrNoArgumentForBool // ErrRequired indicates that a required flag was not provided. ErrRequired // ErrShortNameTooLong indicates that a short flag name was specified, // longer than one character. ErrShortNameTooLong // ErrDuplicatedFlag indicates that a short or long flag has been // defined more than once ErrDuplicatedFlag // ErrTag indicates an error while parsing flag tags. ErrTag // ErrCommandRequired indicates that a command was required but not // specified ErrCommandRequired // ErrUnknownCommand indicates that an unknown command was specified. ErrUnknownCommand // ErrInvalidChoice indicates an invalid option value which only allows // a certain number of choices. ErrInvalidChoice // ErrInvalidTag indicates an invalid tag or invalid use of an existing tag ErrInvalidTag ) func (e ErrorType) String() string { switch e { case ErrUnknown: return "unknown" case ErrExpectedArgument: return "expected argument" case ErrUnknownFlag: return "unknown flag" case ErrUnknownGroup: return "unknown group" case ErrMarshal: return "marshal" case ErrHelp: return "help" case ErrNoArgumentForBool: return "no argument for bool" case ErrRequired: return "required" case ErrShortNameTooLong: return "short name too long" case ErrDuplicatedFlag: return "duplicated flag" case ErrTag: return "tag" case ErrCommandRequired: return "command required" case ErrUnknownCommand: return "unknown command" case ErrInvalidChoice: return "invalid choice" case ErrInvalidTag: return "invalid tag" } return "unrecognized error type" } func (e ErrorType) Error() string { return e.String() } // Error represents a parser error. The error returned from Parse is of this // type. The error contains both a Type and Message. type Error struct { // The type of error Type ErrorType // The error message Message string } // Error returns the error's message func (e *Error) Error() string { return e.Message } func newError(tp ErrorType, message string) *Error { return &Error{ Type: tp, Message: message, } } func newErrorf(tp ErrorType, format string, args ...interface{}) *Error { return newError(tp, fmt.Sprintf(format, args...)) } func wrapError(err error) *Error { ret, ok := err.(*Error) if !ok { return newError(ErrUnknown, err.Error()) } return ret } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/parser.go0000644000000000000000000004504015024302467023621 0ustar rootroot// Copyright 2012 Jesse van den Kieboom. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flags import ( "bytes" "fmt" "os" "path" "reflect" "sort" "strings" "unicode/utf8" ) // A Parser provides command line option parsing. It can contain several // option groups each with their own set of options. type Parser struct { // Embedded, see Command for more information *Command // A usage string to be displayed in the help message. Usage string // Option flags changing the behavior of the parser. Options Options // NamespaceDelimiter separates group namespaces and option long names NamespaceDelimiter string // EnvNamespaceDelimiter separates group env namespaces and env keys EnvNamespaceDelimiter string // UnknownOptionsHandler is a function which gets called when the parser // encounters an unknown option. The function receives the unknown option // name, a SplitArgument which specifies its value if set with an argument // separator, and the remaining command line arguments. // It should return a new list of remaining arguments to continue parsing, // or an error to indicate a parse failure. UnknownOptionHandler func(option string, arg SplitArgument, args []string) ([]string, error) // CompletionHandler is a function gets called to handle the completion of // items. By default, the items are printed and the application is exited. // You can override this default behavior by specifying a custom CompletionHandler. CompletionHandler func(items []Completion) // CommandHandler is a function that gets called to handle execution of a // command. By default, the command will simply be executed. This can be // overridden to perform certain actions (such as applying global flags) // just before the command is executed. Note that if you override the // handler it is your responsibility to call the command.Execute function. // // The command passed into CommandHandler may be nil in case there is no // command to be executed when parsing has finished. CommandHandler func(command Commander, args []string) error internalError error } // SplitArgument represents the argument value of an option that was passed using // an argument separator. type SplitArgument interface { // String returns the option's value as a string, and a boolean indicating // if the option was present. Value() (string, bool) } type strArgument struct { value *string } func (s strArgument) Value() (string, bool) { if s.value == nil { return "", false } return *s.value, true } // Options provides parser options that change the behavior of the option // parser. type Options uint const ( // None indicates no options. None Options = 0 // HelpFlag adds a default Help Options group to the parser containing // -h and --help options. When either -h or --help is specified on the // command line, the parser will return the special error of type // ErrHelp. When PrintErrors is also specified, then the help message // will also be automatically printed to os.Stdout. HelpFlag = 1 << iota // PassDoubleDash passes all arguments after a double dash, --, as // remaining command line arguments (i.e. they will not be parsed for // flags). PassDoubleDash // IgnoreUnknown ignores any unknown options and passes them as // remaining command line arguments instead of generating an error. IgnoreUnknown // PrintErrors prints any errors which occurred during parsing to // os.Stderr. In the special case of ErrHelp, the message will be printed // to os.Stdout. PrintErrors // PassAfterNonOption passes all arguments after the first non option // as remaining command line arguments. This is equivalent to strict // POSIX processing. PassAfterNonOption // AllowBoolValues allows a user to assign true/false to a boolean value // rather than raising an error stating it cannot have an argument. AllowBoolValues // Default is a convenient default set of options which should cover // most of the uses of the flags package. Default = HelpFlag | PrintErrors | PassDoubleDash ) type parseState struct { arg string args []string retargs []string positional []*Arg err error command *Command lookup lookup } // Parse is a convenience function to parse command line options with default // settings. The provided data is a pointer to a struct representing the // default option group (named "Application Options"). For more control, use // flags.NewParser. func Parse(data interface{}) ([]string, error) { return NewParser(data, Default).Parse() } // ParseArgs is a convenience function to parse command line options with default // settings. The provided data is a pointer to a struct representing the // default option group (named "Application Options"). The args argument is // the list of command line arguments to parse. If you just want to parse the // default program command line arguments (i.e. os.Args), then use flags.Parse // instead. For more control, use flags.NewParser. func ParseArgs(data interface{}, args []string) ([]string, error) { return NewParser(data, Default).ParseArgs(args) } // NewParser creates a new parser. It uses os.Args[0] as the application // name and then calls Parser.NewNamedParser (see Parser.NewNamedParser for // more details). The provided data is a pointer to a struct representing the // default option group (named "Application Options"), or nil if the default // group should not be added. The options parameter specifies a set of options // for the parser. func NewParser(data interface{}, options Options) *Parser { p := NewNamedParser(path.Base(os.Args[0]), options) if data != nil { g, err := p.AddGroup("Application Options", "", data) if err == nil { g.parent = p } p.internalError = err } return p } // NewNamedParser creates a new parser. The appname is used to display the // executable name in the built-in help message. Option groups and commands can // be added to this parser by using AddGroup and AddCommand. func NewNamedParser(appname string, options Options) *Parser { p := &Parser{ Command: newCommand(appname, "", "", nil), Options: options, NamespaceDelimiter: ".", EnvNamespaceDelimiter: "_", } p.Command.parent = p return p } // Parse parses the command line arguments from os.Args using Parser.ParseArgs. // For more detailed information see ParseArgs. func (p *Parser) Parse() ([]string, error) { return p.ParseArgs(os.Args[1:]) } // ParseArgs parses the command line arguments according to the option groups that // were added to the parser. On successful parsing of the arguments, the // remaining, non-option, arguments (if any) are returned. The returned error // indicates a parsing error and can be used with PrintError to display // contextual information on where the error occurred exactly. // // When the common help group has been added (AddHelp) and either -h or --help // was specified in the command line arguments, a help message will be // automatically printed if the PrintErrors option is enabled. // Furthermore, the special error type ErrHelp is returned. // It is up to the caller to exit the program if so desired. func (p *Parser) ParseArgs(args []string) ([]string, error) { if p.internalError != nil { return nil, p.internalError } p.eachOption(func(c *Command, g *Group, option *Option) { option.clearReferenceBeforeSet = true option.updateDefaultLiteral() }) // Add built-in help group to all commands if necessary if (p.Options & HelpFlag) != None { p.addHelpGroups(p.showBuiltinHelp) } compval := os.Getenv("GO_FLAGS_COMPLETION") if len(compval) != 0 { comp := &completion{parser: p} items := comp.complete(args) if p.CompletionHandler != nil { p.CompletionHandler(items) } else { comp.print(items, compval == "verbose") os.Exit(0) } return nil, nil } s := &parseState{ args: args, retargs: make([]string, 0, len(args)), } p.fillParseState(s) for !s.eof() { var err error arg := s.pop() // When PassDoubleDash is set and we encounter a --, then // simply append all the rest as arguments and break out if (p.Options&PassDoubleDash) != None && arg == "--" { s.addArgs(s.args...) break } if !argumentIsOption(arg) { if ((p.Options&PassAfterNonOption) != None || s.command.PassAfterNonOption) && s.lookup.commands[arg] == nil { // If PassAfterNonOption is set then all remaining arguments // are considered positional if err = s.addArgs(s.arg); err != nil { break } if err = s.addArgs(s.args...); err != nil { break } break } // Note: this also sets s.err, so we can just check for // nil here and use s.err later if p.parseNonOption(s) != nil { break } continue } prefix, optname, islong := stripOptionPrefix(arg) optname, _, argument := splitOption(prefix, optname, islong) if islong { err = p.parseLong(s, optname, argument) } else { err = p.parseShort(s, optname, argument) } if err != nil { ignoreUnknown := (p.Options & IgnoreUnknown) != None parseErr := wrapError(err) if parseErr.Type != ErrUnknownFlag || (!ignoreUnknown && p.UnknownOptionHandler == nil) { s.err = parseErr break } if ignoreUnknown { s.addArgs(arg) } else if p.UnknownOptionHandler != nil { modifiedArgs, err := p.UnknownOptionHandler(optname, strArgument{argument}, s.args) if err != nil { s.err = err break } s.args = modifiedArgs } } } if s.err == nil { p.eachOption(func(c *Command, g *Group, option *Option) { err := option.clearDefault() if err != nil { if _, ok := err.(*Error); !ok { err = p.marshalError(option, err) } s.err = err } }) s.checkRequired(p) } var reterr error if s.err != nil { reterr = s.err } else if len(s.command.commands) != 0 && !s.command.SubcommandsOptional { reterr = s.estimateCommand() } else if cmd, ok := s.command.data.(Commander); ok { if p.CommandHandler != nil { reterr = p.CommandHandler(cmd, s.retargs) } else { reterr = cmd.Execute(s.retargs) } } else if p.CommandHandler != nil { reterr = p.CommandHandler(nil, s.retargs) } if reterr != nil { var retargs []string if ourErr, ok := reterr.(*Error); !ok || ourErr.Type != ErrHelp { retargs = append([]string{s.arg}, s.args...) } else { retargs = s.args } return retargs, p.printError(reterr) } return s.retargs, nil } func (p *parseState) eof() bool { return len(p.args) == 0 } func (p *parseState) pop() string { if p.eof() { return "" } p.arg = p.args[0] p.args = p.args[1:] return p.arg } func (p *parseState) peek() string { if p.eof() { return "" } return p.args[0] } func (p *parseState) checkRequired(parser *Parser) error { c := parser.Command var required []*Option for c != nil { c.eachGroup(func(g *Group) { for _, option := range g.options { if !option.isSet && option.Required { required = append(required, option) } } }) c = c.Active } if len(required) == 0 { if len(p.positional) > 0 { var reqnames []string for _, arg := range p.positional { argRequired := (!arg.isRemaining() && p.command.ArgsRequired) || arg.Required != -1 || arg.RequiredMaximum != -1 if !argRequired { continue } if arg.isRemaining() { if arg.value.Len() < arg.Required { var arguments string if arg.Required > 1 { arguments = "arguments, but got only " + fmt.Sprintf("%d", arg.value.Len()) } else { arguments = "argument" } reqnames = append(reqnames, "`"+arg.Name+" (at least "+fmt.Sprintf("%d", arg.Required)+" "+arguments+")`") } else if arg.RequiredMaximum != -1 && arg.value.Len() > arg.RequiredMaximum { if arg.RequiredMaximum == 0 { reqnames = append(reqnames, "`"+arg.Name+" (zero arguments)`") } else { var arguments string if arg.RequiredMaximum > 1 { arguments = "arguments, but got " + fmt.Sprintf("%d", arg.value.Len()) } else { arguments = "argument" } reqnames = append(reqnames, "`"+arg.Name+" (at most "+fmt.Sprintf("%d", arg.RequiredMaximum)+" "+arguments+")`") } } } else { reqnames = append(reqnames, "`"+arg.Name+"`") } } if len(reqnames) == 0 { return nil } var msg string if len(reqnames) == 1 { msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0]) } else { msg = fmt.Sprintf("the required arguments %s and %s were not provided", strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1]) } p.err = newError(ErrRequired, msg) return p.err } return nil } names := make([]string, 0, len(required)) for _, k := range required { names = append(names, "`"+k.String()+"'") } sort.Strings(names) var msg string if len(names) == 1 { msg = fmt.Sprintf("the required flag %s was not specified", names[0]) } else { msg = fmt.Sprintf("the required flags %s and %s were not specified", strings.Join(names[:len(names)-1], ", "), names[len(names)-1]) } p.err = newError(ErrRequired, msg) return p.err } func (p *parseState) estimateCommand() error { commands := p.command.sortedVisibleCommands() cmdnames := make([]string, len(commands)) for i, v := range commands { cmdnames[i] = v.Name } var msg string var errtype ErrorType if len(p.retargs) != 0 { c, l := closestChoice(p.retargs[0], cmdnames) msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0]) errtype = ErrUnknownCommand if float32(l)/float32(len(c)) < 0.5 { msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c) } else if len(cmdnames) == 1 { msg = fmt.Sprintf("%s. You should use the %s command", msg, cmdnames[0]) } else if len(cmdnames) > 1 { msg = fmt.Sprintf("%s. Please specify one command of: %s or %s", msg, strings.Join(cmdnames[:len(cmdnames)-1], ", "), cmdnames[len(cmdnames)-1]) } } else { errtype = ErrCommandRequired if len(cmdnames) == 1 { msg = fmt.Sprintf("Please specify the %s command", cmdnames[0]) } else if len(cmdnames) > 1 { msg = fmt.Sprintf("Please specify one command of: %s or %s", strings.Join(cmdnames[:len(cmdnames)-1], ", "), cmdnames[len(cmdnames)-1]) } } return newError(errtype, msg) } func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) { if !option.canArgument() { if argument != nil && (p.Options&AllowBoolValues) == None { return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option) } err = option.Set(argument) } else if argument != nil || (canarg && !s.eof()) { var arg string if argument != nil { arg = *argument } else { arg = s.pop() if validationErr := option.isValidValue(arg); validationErr != nil { return newErrorf(ErrExpectedArgument, validationErr.Error()) } else if p.Options&PassDoubleDash != 0 && arg == "--" { return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option) } } if option.tag.Get("unquote") != "false" { arg, err = unquoteIfPossible(arg) } if err == nil { err = option.Set(&arg) } } else if option.OptionalArgument { option.empty() for _, v := range option.OptionalValue { err = option.Set(&v) if err != nil { break } } } else { err = newErrorf(ErrExpectedArgument, "expected argument for flag `%s'", option) } if err != nil { if _, ok := err.(*Error); !ok { err = p.marshalError(option, err) } } return err } func (p *Parser) marshalError(option *Option, err error) *Error { s := "invalid argument for flag `%s'" expected := p.expectedType(option) if expected != "" { s = s + " (expected " + expected + ")" } return newErrorf(ErrMarshal, s+": %s", option, err.Error()) } func (p *Parser) expectedType(option *Option) string { valueType := option.value.Type() if valueType.Kind() == reflect.Func { return "" } return valueType.String() } func (p *Parser) parseLong(s *parseState, name string, argument *string) error { if option := s.lookup.longNames[name]; option != nil { // Only long options that are required can consume an argument // from the argument list canarg := !option.OptionalArgument return p.parseOption(s, name, option, canarg, argument) } return newErrorf(ErrUnknownFlag, "unknown flag `%s'", name) } func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) { c, n := utf8.DecodeRuneInString(optname) if n == len(optname) { return optname, nil } first := string(c) if option := s.lookup.shortNames[first]; option != nil && option.canArgument() { arg := optname[n:] return first, &arg } return optname, nil } func (p *Parser) parseShort(s *parseState, optname string, argument *string) error { if argument == nil { optname, argument = p.splitShortConcatArg(s, optname) } for i, c := range optname { shortname := string(c) if option := s.lookup.shortNames[shortname]; option != nil { // Only the last short argument can consume an argument from // the arguments list, and only if it's non optional canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument if err := p.parseOption(s, shortname, option, canarg, argument); err != nil { return err } } else { return newErrorf(ErrUnknownFlag, "unknown flag `%s'", shortname) } // Only the first option can have a concatted argument, so just // clear argument here argument = nil } return nil } func (p *parseState) addArgs(args ...string) error { for len(p.positional) > 0 && len(args) > 0 { arg := p.positional[0] if err := convert(args[0], arg.value, arg.tag); err != nil { p.err = err return err } if !arg.isRemaining() { p.positional = p.positional[1:] } args = args[1:] } p.retargs = append(p.retargs, args...) return nil } func (p *Parser) parseNonOption(s *parseState) error { if len(s.positional) > 0 { return s.addArgs(s.arg) } if len(s.command.commands) > 0 && len(s.retargs) == 0 { if cmd := s.lookup.commands[s.arg]; cmd != nil { s.command.Active = cmd cmd.fillParseState(s) return nil } else if !s.command.SubcommandsOptional { s.addArgs(s.arg) return newErrorf(ErrUnknownCommand, "Unknown command `%s'", s.arg) } } return s.addArgs(s.arg) } func (p *Parser) showBuiltinHelp() error { var b bytes.Buffer p.WriteHelp(&b) return newError(ErrHelp, b.String()) } func (p *Parser) printError(err error) error { if err != nil && (p.Options&PrintErrors) != None { flagsErr, ok := err.(*Error) if ok && flagsErr.Type == ErrHelp { fmt.Fprintln(os.Stdout, err) } else { fmt.Fprintln(os.Stderr, err) } } return err } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/check_crosscompile.sh0000644000000000000000000000071515024302467026171 0ustar rootroot#!/bin/bash set -e echo '# linux arm7' GOARM=7 GOARCH=arm GOOS=linux go build echo '# linux arm5' GOARM=5 GOARCH=arm GOOS=linux go build echo '# windows 386' GOARCH=386 GOOS=windows go build echo '# windows amd64' GOARCH=amd64 GOOS=windows go build echo '# darwin' GOARCH=amd64 GOOS=darwin go build echo '# freebsd' GOARCH=amd64 GOOS=freebsd go build echo '# aix ppc64' GOARCH=ppc64 GOOS=aix go build echo '# solaris amd64' GOARCH=amd64 GOOS=solaris go build dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/options_test.go0000644000000000000000000000542715024302467025064 0ustar rootrootpackage flags import ( "strings" "testing" ) func TestPassDoubleDash(t *testing.T) { var opts = struct { Value bool `short:"v"` }{} p := NewParser(&opts, PassDoubleDash) ret, err := p.ParseArgs([]string{"-v", "--", "-v", "-g"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if !opts.Value { t.Errorf("Expected Value to be true") } assertStringArray(t, ret, []string{"-v", "-g"}) } func TestPassAfterNonOption(t *testing.T) { var opts = struct { Value bool `short:"v"` }{} p := NewParser(&opts, PassAfterNonOption) ret, err := p.ParseArgs([]string{"-v", "arg", "-v", "-g"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if !opts.Value { t.Errorf("Expected Value to be true") } assertStringArray(t, ret, []string{"arg", "-v", "-g"}) } func TestPassAfterNonOptionWithPositional(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []string `required:"yes"` } `positional-args:"yes"` }{} p := NewParser(&opts, PassAfterNonOption) ret, err := p.ParseArgs([]string{"-v", "arg", "-v", "-g"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if !opts.Value { t.Errorf("Expected Value to be true") } assertStringArray(t, ret, []string{}) assertStringArray(t, opts.Positional.Rest, []string{"arg", "-v", "-g"}) } func TestPassAfterNonOptionWithPositionalIntPass(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []int `required:"yes"` } `positional-args:"yes"` }{} p := NewParser(&opts, PassAfterNonOption) ret, err := p.ParseArgs([]string{"-v", "1", "2", "3"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if !opts.Value { t.Errorf("Expected Value to be true") } assertStringArray(t, ret, []string{}) for i, rest := range opts.Positional.Rest { if rest != i+1 { assertErrorf(t, "Expected %v got %v", i+1, rest) } } } func TestPassAfterNonOptionWithPositionalIntFail(t *testing.T) { var opts = struct { Value bool `short:"v"` Positional struct { Rest []int `required:"yes"` } `positional-args:"yes"` }{} tests := []struct { opts []string errContains string ret []string }{ { []string{"-v", "notint1", "notint2", "notint3"}, "notint1", []string{"notint1", "notint2", "notint3"}, }, { []string{"-v", "1", "notint2", "notint3"}, "notint2", []string{"1", "notint2", "notint3"}, }, } for _, test := range tests { p := NewParser(&opts, PassAfterNonOption) ret, err := p.ParseArgs(test.opts) if err == nil { assertErrorf(t, "Expected error") return } if !strings.Contains(err.Error(), test.errContains) { assertErrorf(t, "Expected the first illegal argument in the error") } assertStringArray(t, ret, test.ret) } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/.github/0000755000000000000000000000000015024302467023333 5ustar rootrootdependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/.github/workflows/0000755000000000000000000000000015024302467025370 5ustar rootrootdependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/.github/workflows/go.yml0000644000000000000000000000121415024302467026516 0ustar rootroot# This workflow will build a golang project # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go name: Go on: push: branches: ["main"] pull_request: branches: ["main"] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: setup uses: actions/setup-go@v4 with: go-version: "1.20" - name: build run: go build -v ./... - name: test run: go test -v ./... - name: gofmt run: exit $(gofmt -l . | wc -l) - name: vet run: go vet -all=true -v=true . dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/short_test.go0000644000000000000000000001152315024302467024522 0ustar rootrootpackage flags import ( "fmt" "testing" ) func TestShort(t *testing.T) { var opts = struct { Value bool `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v") assertStringArray(t, ret, []string{}) if !opts.Value { t.Errorf("Expected Value to be true") } } func TestShortTooLong(t *testing.T) { var opts = struct { Value bool `short:"vv"` }{} assertParseFail(t, ErrShortNameTooLong, "short names can only be 1 character long, not `vv'", &opts) } func TestShortRequired(t *testing.T) { var opts = struct { Value bool `short:"v" required:"true"` }{} assertParseFail(t, ErrRequired, fmt.Sprintf("the required flag `%cv' was not specified", defaultShortOptDelimiter), &opts) } func TestShortRequiredFalsy1(t *testing.T) { var opts = struct { Value bool `short:"v" required:"false"` }{} assertParseSuccess(t, &opts) } func TestShortRequiredFalsy2(t *testing.T) { var opts = struct { Value bool `short:"v" required:"no"` }{} assertParseSuccess(t, &opts) } func TestShortMultiConcat(t *testing.T) { var opts = struct { V bool `short:"v"` O bool `short:"o"` F bool `short:"f"` }{} ret := assertParseSuccess(t, &opts, "-vo", "-f") assertStringArray(t, ret, []string{}) if !opts.V { t.Errorf("Expected V to be true") } if !opts.O { t.Errorf("Expected O to be true") } if !opts.F { t.Errorf("Expected F to be true") } } func TestShortMultiRequiredConcat(t *testing.T) { var opts = struct { V bool `short:"v" required:"true"` O bool `short:"o" required:"true"` F bool `short:"f" required:"true"` }{} ret := assertParseSuccess(t, &opts, "-vo", "-f") assertStringArray(t, ret, []string{}) if !opts.V { t.Errorf("Expected V to be true") } if !opts.O { t.Errorf("Expected O to be true") } if !opts.F { t.Errorf("Expected F to be true") } } func TestShortMultiSlice(t *testing.T) { var opts = struct { Values []bool `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v", "-v") assertStringArray(t, ret, []string{}) assertBoolArray(t, opts.Values, []bool{true, true}) } func TestShortMultiSliceConcat(t *testing.T) { var opts = struct { Values []bool `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-vvv") assertStringArray(t, ret, []string{}) assertBoolArray(t, opts.Values, []bool{true, true, true}) } func TestShortWithEqualArg(t *testing.T) { var opts = struct { Value string `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v=value") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "value") } func TestShortWithArg(t *testing.T) { var opts = struct { Value string `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-vvalue") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "value") } func TestShortArg(t *testing.T) { var opts = struct { Value string `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-v", "value") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "value") } func TestShortMultiWithEqualArg(t *testing.T) { var opts = struct { F []bool `short:"f"` Value string `short:"v"` }{} assertParseFail(t, ErrExpectedArgument, fmt.Sprintf("expected argument for flag `%cv'", defaultShortOptDelimiter), &opts, "-ffv=value") } func TestShortMultiArg(t *testing.T) { var opts = struct { F []bool `short:"f"` Value string `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-ffv", "value") assertStringArray(t, ret, []string{}) assertBoolArray(t, opts.F, []bool{true, true}) assertString(t, opts.Value, "value") } func TestShortMultiArgConcatFail(t *testing.T) { var opts = struct { F []bool `short:"f"` Value string `short:"v"` }{} assertParseFail(t, ErrExpectedArgument, fmt.Sprintf("expected argument for flag `%cv'", defaultShortOptDelimiter), &opts, "-ffvvalue") } func TestShortMultiArgConcat(t *testing.T) { var opts = struct { F []bool `short:"f"` Value string `short:"v"` }{} ret := assertParseSuccess(t, &opts, "-vff") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "ff") } func TestShortOptional(t *testing.T) { var opts = struct { F []bool `short:"f"` Value string `short:"v" optional:"yes" optional-value:"value"` }{} ret := assertParseSuccess(t, &opts, "-fv", "f") assertStringArray(t, ret, []string{"f"}) assertString(t, opts.Value, "value") } func TestShortOptionalFalsy1(t *testing.T) { var opts = struct { F []bool `short:"f"` Value string `short:"v" optional:"false" optional-value:"value"` }{} ret := assertParseSuccess(t, &opts, "-fv", "f") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "f") } func TestShortOptionalFalsy2(t *testing.T) { var opts = struct { F []bool `short:"f"` Value string `short:"v" optional:"no" optional-value:"value"` }{} ret := assertParseSuccess(t, &opts, "-fv", "f") assertStringArray(t, ret, []string{}) assertString(t, opts.Value, "f") } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/termsize_windows.go0000644000000000000000000000333115024302467025736 0ustar rootroot//go:build windows // +build windows package flags import ( "syscall" "unsafe" ) type ( SHORT int16 WORD uint16 SMALL_RECT struct { Left SHORT Top SHORT Right SHORT Bottom SHORT } COORD struct { X SHORT Y SHORT } CONSOLE_SCREEN_BUFFER_INFO struct { Size COORD CursorPosition COORD Attributes WORD Window SMALL_RECT MaximumWindowSize COORD } ) var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") var getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") func getError(r1, r2 uintptr, lastErr error) error { // If the function fails, the return value is zero. if r1 == 0 { if lastErr != nil { return lastErr } return syscall.EINVAL } return nil } func getStdHandle(stdhandle int) (uintptr, error) { handle, err := syscall.GetStdHandle(stdhandle) if err != nil { return 0, err } return uintptr(handle), nil } // GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { var info CONSOLE_SCREEN_BUFFER_INFO if err := getError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)); err != nil { return nil, err } return &info, nil } func getTerminalColumns() int { defaultWidth := 80 stdoutHandle, err := getStdHandle(syscall.STD_OUTPUT_HANDLE) if err != nil { return defaultWidth } info, err := GetConsoleScreenBufferInfo(stdoutHandle) if err != nil { return defaultWidth } if info.MaximumWindowSize.X > 0 { return int(info.MaximumWindowSize.X) } return defaultWidth } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/parser_test.go0000644000000000000000000004726615024302467024674 0ustar rootrootpackage flags import ( "errors" "fmt" "os" "reflect" "runtime" "strconv" "strings" "testing" "time" ) type defaultOptions struct { Int int `long:"i"` IntDefault int `long:"id" default:"1"` IntUnderscore int `long:"idu" default:"1_0"` Float64 float64 `long:"f"` Float64Default float64 `long:"fd" default:"-3.14"` Float64Underscore float64 `long:"fdu" default:"-3_3.14"` NumericFlag bool `short:"3"` String string `long:"str"` StringDefault string `long:"strd" default:"abc"` StringNotUnquoted string `long:"strnot" unquote:"false"` Time time.Duration `long:"t"` TimeDefault time.Duration `long:"td" default:"1m"` Map map[string]int `long:"m"` MapDefault map[string]int `long:"md" default:"a:1"` Slice []int `long:"s"` SliceDefault []int `long:"sd" default:"1" default:"2"` } func TestDefaults(t *testing.T) { var tests = []struct { msg string args []string expected defaultOptions expectedErr string }{ { msg: "no arguments, expecting default values", args: []string{}, expected: defaultOptions{ Int: 0, IntDefault: 1, IntUnderscore: 10, Float64: 0.0, Float64Default: -3.14, Float64Underscore: -33.14, NumericFlag: false, String: "", StringDefault: "abc", Time: 0, TimeDefault: time.Minute, Map: map[string]int{}, MapDefault: map[string]int{"a": 1}, Slice: []int{}, SliceDefault: []int{1, 2}, }, }, { msg: "non-zero value arguments, expecting overwritten arguments", args: []string{"--i=3", "--id=3", "--idu=3_3", "--f=-2.71", "--fd=2.71", "--fdu=2_2.71", "-3", "--str=def", "--strd=def", "--t=3ms", "--td=3ms", "--m=c:3", "--md=c:3", "--s=3", "--sd=3"}, expected: defaultOptions{ Int: 3, IntDefault: 3, IntUnderscore: 33, Float64: -2.71, Float64Default: 2.71, Float64Underscore: 22.71, NumericFlag: true, String: "def", StringDefault: "def", Time: 3 * time.Millisecond, TimeDefault: 3 * time.Millisecond, Map: map[string]int{"c": 3}, MapDefault: map[string]int{"c": 3}, Slice: []int{3}, SliceDefault: []int{3}, }, }, { msg: "non-zero value arguments, expecting overwritten arguments", args: []string{"-3=true"}, expectedErr: "bool flag `-3' cannot have an argument", }, { msg: "zero value arguments, expecting overwritten arguments", args: []string{"--i=0", "--id=0", "--idu=0", "--f=0", "--fd=0", "--fdu=0", "--str", "", "--strd=\"\"", "--t=0ms", "--td=0s", "--m=:0", "--md=:0", "--s=0", "--sd=0"}, expected: defaultOptions{ Int: 0, IntDefault: 0, IntUnderscore: 0, Float64: 0, Float64Default: 0, Float64Underscore: 0, String: "", StringDefault: "", Time: 0, TimeDefault: 0, Map: map[string]int{"": 0}, MapDefault: map[string]int{"": 0}, Slice: []int{0}, SliceDefault: []int{0}, }, }, } for _, test := range tests { var opts defaultOptions _, err := ParseArgs(&opts, test.args) if test.expectedErr != "" { if err == nil { t.Errorf("%s:\nExpected error containing substring %q", test.msg, test.expectedErr) } else if !strings.Contains(err.Error(), test.expectedErr) { t.Errorf("%s:\nExpected error %q to contain substring %q", test.msg, err, test.expectedErr) } } else { if err != nil { t.Fatalf("%s:\nUnexpected error: %v", test.msg, err) } if opts.Slice == nil { opts.Slice = []int{} } if !reflect.DeepEqual(opts, test.expected) { t.Errorf("%s:\nUnexpected options with arguments %+v\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.args, test.expected, opts) } } } } func TestNoDefaultsForBools(t *testing.T) { var opts struct { DefaultBool bool `short:"d" default:"true"` } if runtime.GOOS == "windows" { assertParseFail(t, ErrInvalidTag, "boolean flag `/d' may not have default values, they always default to `false' and can only be turned on", &opts) } else { assertParseFail(t, ErrInvalidTag, "boolean flag `-d' may not have default values, they always default to `false' and can only be turned on", &opts) } } func TestUnquoting(t *testing.T) { var tests = []struct { arg string err error value string }{ { arg: "\"abc", err: strconv.ErrSyntax, value: "", }, { arg: "\"\"abc\"", err: strconv.ErrSyntax, value: "", }, { arg: "\"abc\"", err: nil, value: "abc", }, { arg: "\"\\\"abc\\\"\"", err: nil, value: "\"abc\"", }, { arg: "\"\\\"abc\"", err: nil, value: "\"abc", }, } for _, test := range tests { var opts defaultOptions for _, delimiter := range []bool{false, true} { p := NewParser(&opts, None) var err error if delimiter { _, err = p.ParseArgs([]string{"--str=" + test.arg, "--strnot=" + test.arg}) } else { _, err = p.ParseArgs([]string{"--str", test.arg, "--strnot", test.arg}) } if test.err == nil { if err != nil { t.Fatalf("Expected no error but got: %v", err) } if test.value != opts.String { t.Fatalf("Expected String to be %q but got %q", test.value, opts.String) } if q := strconv.Quote(test.value); q != opts.StringNotUnquoted { t.Fatalf("Expected StringDefault to be %q but got %q", q, opts.StringNotUnquoted) } } else { if err == nil { t.Fatalf("Expected error") } else if e, ok := err.(*Error); ok { if strings.HasPrefix(e.Message, test.err.Error()) { t.Fatalf("Expected error message to end with %q but got %v", test.err.Error(), e.Message) } } } } } } // EnvRestorer keeps a copy of a set of env variables and can restore the env from them type EnvRestorer struct { env map[string]string } func (r *EnvRestorer) Restore() { os.Clearenv() for k, v := range r.env { os.Setenv(k, v) } } // EnvSnapshot returns a snapshot of the currently set env variables func EnvSnapshot() *EnvRestorer { r := EnvRestorer{make(map[string]string)} for _, kv := range os.Environ() { parts := strings.SplitN(kv, "=", 2) if len(parts) != 2 { panic("got a weird env variable: " + kv) } r.env[parts[0]] = parts[1] } return &r } type envNestedOptions struct { Foo string `long:"foo" default:"z" env:"FOO"` } type envDefaultOptions struct { Int int `long:"i" default:"1" env:"TEST_I"` Time time.Duration `long:"t" default:"1m" env:"TEST_T"` Map map[string]int `long:"m" default:"a:1" env:"TEST_M" env-delim:";"` Slice []int `long:"s" default:"1" default:"2" env:"TEST_S" env-delim:","` Nested envNestedOptions `group:"nested" namespace:"nested" env-namespace:"NESTED"` } func TestEnvDefaults(t *testing.T) { var tests = []struct { msg string args []string expected envDefaultOptions expectedErr string env map[string]string }{ { msg: "no arguments, no env, expecting default values", args: []string{}, expected: envDefaultOptions{ Int: 1, Time: time.Minute, Map: map[string]int{"a": 1}, Slice: []int{1, 2}, Nested: envNestedOptions{ Foo: "z", }, }, }, { msg: "no arguments, env defaults, expecting env default values", args: []string{}, expected: envDefaultOptions{ Int: 2, Time: 2 * time.Minute, Map: map[string]int{"a": 2, "b": 3}, Slice: []int{4, 5, 6}, Nested: envNestedOptions{ Foo: "a", }, }, env: map[string]string{ "TEST_I": "2", "TEST_T": "2m", "TEST_M": "a:2;b:3", "TEST_S": "4,5,6", "NESTED_FOO": "a", }, }, { msg: "no arguments, malformed env defaults, expecting parse error", args: []string{}, expectedErr: `parsing "two": invalid syntax`, env: map[string]string{ "TEST_I": "two", }, }, { msg: "non-zero value arguments, expecting overwritten arguments", args: []string{"--i=3", "--t=3ms", "--m=c:3", "--s=3", "--nested.foo=\"p\""}, expected: envDefaultOptions{ Int: 3, Time: 3 * time.Millisecond, Map: map[string]int{"c": 3}, Slice: []int{3}, Nested: envNestedOptions{ Foo: "p", }, }, env: map[string]string{ "TEST_I": "2", "TEST_T": "2m", "TEST_M": "a:2;b:3", "TEST_S": "4,5,6", "NESTED_FOO": "a", }, }, { msg: "zero value arguments, expecting overwritten arguments", args: []string{"--i=0", "--t=0ms", "--m=:0", "--s=0", "--nested.foo=\"\""}, expected: envDefaultOptions{ Int: 0, Time: 0, Map: map[string]int{"": 0}, Slice: []int{0}, Nested: envNestedOptions{ Foo: "", }, }, env: map[string]string{ "TEST_I": "2", "TEST_T": "2m", "TEST_M": "a:2;b:3", "TEST_S": "4,5,6", "NESTED_FOO": "a", }, }, } oldEnv := EnvSnapshot() defer oldEnv.Restore() for _, test := range tests { var opts envDefaultOptions oldEnv.Restore() for envKey, envValue := range test.env { os.Setenv(envKey, envValue) } _, err := NewParser(&opts, None).ParseArgs(test.args) if test.expectedErr != "" { if err == nil { t.Errorf("%s:\nExpected error containing substring %q", test.msg, test.expectedErr) } else if !strings.Contains(err.Error(), test.expectedErr) { t.Errorf("%s:\nExpected error %q to contain substring %q", test.msg, err, test.expectedErr) } } else { if err != nil { t.Fatalf("%s:\nUnexpected error: %v", test.msg, err) } if opts.Slice == nil { opts.Slice = []int{} } if !reflect.DeepEqual(opts, test.expected) { t.Errorf("%s:\nUnexpected options with arguments %+v\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.args, test.expected, opts) } } } } type CustomFlag struct { Value string } func (c *CustomFlag) UnmarshalFlag(s string) error { c.Value = s return nil } func (c *CustomFlag) IsValidValue(s string) error { if !(s == "-1" || s == "-foo") { return errors.New("invalid flag value") } return nil } func TestOptionAsArgument(t *testing.T) { var tests = []struct { args []string expectError bool errType ErrorType errMsg string rest []string }{ { // short option must not be accepted as argument args: []string{"--string-slice", "foobar", "--string-slice", "-o"}, expectError: true, errType: ErrExpectedArgument, errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-o'", }, { // long option must not be accepted as argument args: []string{"--string-slice", "foobar", "--string-slice", "--other-option"}, expectError: true, errType: ErrExpectedArgument, errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `--other-option'", }, { // long option must not be accepted as argument args: []string{"--string-slice", "--"}, expectError: true, errType: ErrExpectedArgument, errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got double dash `--'", }, { // quoted and appended option should be accepted as argument (even if it looks like an option) args: []string{"--string-slice", "foobar", "--string-slice=\"--other-option\""}, }, { // Accept any single character arguments including '-' args: []string{"--string-slice", "-"}, }, { // Do not accept arguments which start with '-' even if the next character is a digit args: []string{"--string-slice", "-3.14"}, expectError: true, errType: ErrExpectedArgument, errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-3.14'", }, { // Do not accept arguments which start with '-' if the next character is not a digit args: []string{"--string-slice", "-character"}, expectError: true, errType: ErrExpectedArgument, errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-character'", }, { args: []string{"-o", "-", "-"}, rest: []string{"-", "-"}, }, { // Accept arguments which start with '-' if the next character is a digit args: []string{"--int-slice", "-3"}, }, { // Accept arguments which start with '-' if the next character is a digit args: []string{"--int16", "-3"}, }, { // Accept arguments which start with '-' if the next character is a digit args: []string{"--float32", "-3.2"}, }, { // Accept arguments which start with '-' if the next character is a digit args: []string{"--float32ptr", "-3.2"}, }, { // Accept arguments for values that pass the IsValidValue fuction for value validators args: []string{"--custom-flag", "-foo"}, }, { // Accept arguments for values that pass the IsValidValue fuction for value validators args: []string{"--custom-flag", "-1"}, }, { // Rejects arguments for values that fail the IsValidValue fuction for value validators args: []string{"--custom-flag", "-2"}, expectError: true, errType: ErrExpectedArgument, errMsg: "invalid flag value", }, } var opts struct { StringSlice []string `long:"string-slice"` IntSlice []int `long:"int-slice"` Int16 int16 `long:"int16"` Float32 float32 `long:"float32"` Float32Ptr *float32 `long:"float32ptr"` OtherOption bool `long:"other-option" short:"o"` Custom CustomFlag `long:"custom-flag" short:"c"` } for _, test := range tests { if test.expectError { assertParseFail(t, test.errType, test.errMsg, &opts, test.args...) } else { args := assertParseSuccess(t, &opts, test.args...) assertStringArray(t, args, test.rest) } } } func TestUnknownFlagHandler(t *testing.T) { var opts struct { Flag1 string `long:"flag1"` Flag2 string `long:"flag2"` } p := NewParser(&opts, None) var unknownFlag1 string var unknownFlag2 bool var unknownFlag3 string // Set up a callback to intercept unknown options during parsing p.UnknownOptionHandler = func(option string, arg SplitArgument, args []string) ([]string, error) { if option == "unknownFlag1" { if argValue, ok := arg.Value(); ok { unknownFlag1 = argValue return args, nil } // consume a value from remaining args list unknownFlag1 = args[0] return args[1:], nil } else if option == "unknownFlag2" { // treat this one as a bool switch, don't consume any args unknownFlag2 = true return args, nil } else if option == "unknownFlag3" { if argValue, ok := arg.Value(); ok { unknownFlag3 = argValue return args, nil } // consume a value from remaining args list unknownFlag3 = args[0] return args[1:], nil } return args, fmt.Errorf("Unknown flag: %v", option) } // Parse args containing some unknown flags, verify that // our callback can handle all of them _, err := p.ParseArgs([]string{"--flag1=stuff", "--unknownFlag1", "blah", "--unknownFlag2", "--unknownFlag3=baz", "--flag2=foo"}) if err != nil { assertErrorf(t, "Parser returned unexpected error %v", err) } assertString(t, opts.Flag1, "stuff") assertString(t, opts.Flag2, "foo") assertString(t, unknownFlag1, "blah") assertString(t, unknownFlag3, "baz") if !unknownFlag2 { assertErrorf(t, "Flag should have been set by unknown handler, but had value: %v", unknownFlag2) } // Parse args with unknown flags that callback doesn't handle, verify it returns error _, err = p.ParseArgs([]string{"--flag1=stuff", "--unknownFlagX", "blah", "--flag2=foo"}) if err == nil { assertErrorf(t, "Parser should have returned error, but returned nil") } } func TestChoices(t *testing.T) { var opts struct { Choice string `long:"choose" choice:"v1" choice:"v2"` } assertParseFail(t, ErrInvalidChoice, "Invalid value `invalid' for option `"+defaultLongOptDelimiter+"choose'. Allowed values are: v1 or v2", &opts, "--choose", "invalid") assertParseSuccess(t, &opts, "--choose", "v2") assertString(t, opts.Choice, "v2") } func TestEmbedded(t *testing.T) { type embedded struct { V bool `short:"v"` } var opts struct { embedded } assertParseSuccess(t, &opts, "-v") if !opts.V { t.Errorf("Expected V to be true") } } type command struct { } func (c *command) Execute(args []string) error { return nil } func TestCommandHandlerNoCommand(t *testing.T) { var opts = struct { Value bool `short:"v"` }{} parser := NewParser(&opts, Default&^PrintErrors) var executedCommand Commander var executedArgs []string executed := false parser.CommandHandler = func(command Commander, args []string) error { executed = true executedCommand = command executedArgs = args return nil } _, err := parser.ParseArgs([]string{"arg1", "arg2"}) if err != nil { t.Fatalf("Unexpected parse error: %s", err) } if !executed { t.Errorf("Expected command handler to be executed") } if executedCommand != nil { t.Errorf("Did not exect an executed command") } assertStringArray(t, executedArgs, []string{"arg1", "arg2"}) } func TestCommandHandler(t *testing.T) { var opts = struct { Value bool `short:"v"` Command command `command:"cmd"` }{} parser := NewParser(&opts, Default&^PrintErrors) var executedCommand Commander var executedArgs []string executed := false parser.CommandHandler = func(command Commander, args []string) error { executed = true executedCommand = command executedArgs = args return nil } _, err := parser.ParseArgs([]string{"cmd", "arg1", "arg2"}) if err != nil { t.Fatalf("Unexpected parse error: %s", err) } if !executed { t.Errorf("Expected command handler to be executed") } if executedCommand == nil { t.Errorf("Expected command handler to be executed") } assertStringArray(t, executedArgs, []string{"arg1", "arg2"}) } func TestAllowBoolValues(t *testing.T) { var tests = []struct { msg string args []string expectedErr string expected bool expectedNonOptArgs []string }{ { msg: "no value", args: []string{"-v"}, expected: true, }, { msg: "true value", args: []string{"-v=true"}, expected: true, }, { msg: "false value", args: []string{"-v=false"}, expected: false, }, { msg: "bad value", args: []string{"-v=badvalue"}, expectedErr: `parsing "badvalue": invalid syntax`, }, { // this test is to ensure flag values can only be specified as --flag=value and not "--flag value". // if "--flag value" was supported it's not clear if value should be a non-optional argument // or the value for the flag. msg: "validate flags can only be set with a value immediately following an assignment operator (=)", args: []string{"-v", "false"}, expected: true, expectedNonOptArgs: []string{"false"}, }, } for _, test := range tests { var opts = struct { Value bool `short:"v"` }{} parser := NewParser(&opts, AllowBoolValues) nonOptArgs, err := parser.ParseArgs(test.args) if test.expectedErr == "" { if err != nil { t.Fatalf("%s:\nUnexpected parse error: %s", test.msg, err) } if opts.Value != test.expected { t.Errorf("%s:\nExpected %v; got %v", test.msg, test.expected, opts.Value) } if len(test.expectedNonOptArgs) != len(nonOptArgs) && !reflect.DeepEqual(test.expectedNonOptArgs, nonOptArgs) { t.Errorf("%s:\nUnexpected non-argument options\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.expectedNonOptArgs, nonOptArgs) } } else { if err == nil { t.Errorf("%s:\nExpected error containing substring %q", test.msg, test.expectedErr) } else if !strings.Contains(err.Error(), test.expectedErr) { t.Errorf("%s:\nExpected error %q to contain substring %q", test.msg, err, test.expectedErr) } } } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/option.go0000644000000000000000000003044315024302467023636 0ustar rootrootpackage flags import ( "bytes" "fmt" "os" "reflect" "strings" "unicode/utf8" ) // Option flag information. Contains a description of the option, short and // long name as well as a default value and whether an argument for this // flag is optional. type Option struct { // The description of the option flag. This description is shown // automatically in the built-in help. Description string // The short name of the option (a single character). If not 0, the // option flag can be 'activated' using -. Either ShortName // or LongName needs to be non-empty. ShortName rune // The long name of the option. If not "", the option flag can be // activated using --. Either ShortName or LongName needs // to be non-empty. LongName string // The default value of the option. Default []string // The optional environment default value key name. EnvDefaultKey string // The optional delimiter string for EnvDefaultKey values. EnvDefaultDelim string // If true, specifies that the argument to an option flag is optional. // When no argument to the flag is specified on the command line, the // value of OptionalValue will be set in the field this option represents. // This is only valid for non-boolean options. OptionalArgument bool // The optional value of the option. The optional value is used when // the option flag is marked as having an OptionalArgument. This means // that when the flag is specified, but no option argument is given, // the value of the field this option represents will be set to // OptionalValue. This is only valid for non-boolean options. OptionalValue []string // If true, the option _must_ be specified on the command line. If the // option is not specified, the parser will generate an ErrRequired type // error. Required bool // A name for the value of an option shown in the Help as --flag [ValueName] ValueName string // A mask value to show in the help instead of the default value. This // is useful for hiding sensitive information in the help, such as // passwords. DefaultMask string // If non empty, only a certain set of values is allowed for an option. Choices []string // If true, the option is not displayed in the help or man page Hidden bool // The group which the option belongs to group *Group // The struct field which the option represents. field reflect.StructField // The struct field value which the option represents. value reflect.Value // Determines if the option will be always quoted in the INI output iniQuote bool tag multiTag isSet bool isSetDefault bool preventDefault bool clearReferenceBeforeSet bool defaultLiteral string } // LongNameWithNamespace returns the option's long name with the group namespaces // prepended by walking up the option's group tree. Namespaces and the long name // itself are separated by the parser's namespace delimiter. If the long name is // empty an empty string is returned. func (option *Option) LongNameWithNamespace() string { if len(option.LongName) == 0 { return "" } // fetch the namespace delimiter from the parser which is always at the // end of the group hierarchy namespaceDelimiter := "" g := option.group for { if p, ok := g.parent.(*Parser); ok { namespaceDelimiter = p.NamespaceDelimiter break } switch i := g.parent.(type) { case *Command: g = i.Group case *Group: g = i } } // concatenate long name with namespace longName := option.LongName g = option.group for g != nil { if g.Namespace != "" { longName = g.Namespace + namespaceDelimiter + longName } switch i := g.parent.(type) { case *Command: g = i.Group case *Group: g = i case *Parser: g = nil } } return longName } // EnvKeyWithNamespace returns the option's env key with the group namespaces // prepended by walking up the option's group tree. Namespaces and the env key // itself are separated by the parser's namespace delimiter. If the env key is // empty an empty string is returned. func (option *Option) EnvKeyWithNamespace() string { if len(option.EnvDefaultKey) == 0 { return "" } // fetch the namespace delimiter from the parser which is always at the // end of the group hierarchy namespaceDelimiter := "" g := option.group for { if p, ok := g.parent.(*Parser); ok { namespaceDelimiter = p.EnvNamespaceDelimiter break } switch i := g.parent.(type) { case *Command: g = i.Group case *Group: g = i } } // concatenate long name with namespace key := option.EnvDefaultKey g = option.group for g != nil { if g.EnvNamespace != "" { key = g.EnvNamespace + namespaceDelimiter + key } switch i := g.parent.(type) { case *Command: g = i.Group case *Group: g = i case *Parser: g = nil } } return key } // String converts an option to a human friendly readable string describing the // option. func (option *Option) String() string { var s string var short string if option.ShortName != 0 { data := make([]byte, utf8.RuneLen(option.ShortName)) utf8.EncodeRune(data, option.ShortName) short = string(data) if len(option.LongName) != 0 { s = fmt.Sprintf("%s%s, %s%s", string(defaultShortOptDelimiter), short, defaultLongOptDelimiter, option.LongNameWithNamespace()) } else { s = fmt.Sprintf("%s%s", string(defaultShortOptDelimiter), short) } } else if len(option.LongName) != 0 { s = fmt.Sprintf("%s%s", defaultLongOptDelimiter, option.LongNameWithNamespace()) } return s } // Value returns the option value as an interface{}. func (option *Option) Value() interface{} { return option.value.Interface() } // Field returns the reflect struct field of the option. func (option *Option) Field() reflect.StructField { return option.field } // IsSet returns true if option has been set func (option *Option) IsSet() bool { return option.isSet } // IsSetDefault returns true if option has been set via the default option tag func (option *Option) IsSetDefault() bool { return option.isSetDefault } // Set the value of an option to the specified value. An error will be returned // if the specified value could not be converted to the corresponding option // value type. func (option *Option) Set(value *string) error { kind := option.value.Type().Kind() if (kind == reflect.Map || kind == reflect.Slice) && option.clearReferenceBeforeSet { option.empty() } option.isSet = true option.preventDefault = true option.clearReferenceBeforeSet = false if len(option.Choices) != 0 { found := false for _, choice := range option.Choices { if choice == *value { found = true break } } if !found { allowed := strings.Join(option.Choices[0:len(option.Choices)-1], ", ") if len(option.Choices) > 1 { allowed += " or " + option.Choices[len(option.Choices)-1] } return newErrorf(ErrInvalidChoice, "Invalid value `%s' for option `%s'. Allowed values are: %s", *value, option, allowed) } } if option.isFunc() { return option.call(value) } else if value != nil { return convert(*value, option.value, option.tag) } return convert("", option.value, option.tag) } func (option *Option) setDefault(value *string) error { if option.preventDefault { return nil } if err := option.Set(value); err != nil { return err } option.isSetDefault = true option.preventDefault = false return nil } func (option *Option) showInHelp() bool { return !option.Hidden && (option.ShortName != 0 || len(option.LongName) != 0) } func (option *Option) canArgument() bool { if u := option.isUnmarshaler(); u != nil { return true } return !option.isBool() } func (option *Option) emptyValue() reflect.Value { tp := option.value.Type() if tp.Kind() == reflect.Map { return reflect.MakeMap(tp) } return reflect.Zero(tp) } func (option *Option) empty() { if !option.isFunc() { option.value.Set(option.emptyValue()) } } func (option *Option) clearDefault() error { if option.preventDefault { return nil } usedDefault := option.Default if envKey := option.EnvKeyWithNamespace(); envKey != "" { if value, ok := os.LookupEnv(envKey); ok { if option.EnvDefaultDelim != "" { usedDefault = strings.Split(value, option.EnvDefaultDelim) } else { usedDefault = []string{value} } } } option.isSetDefault = true if len(usedDefault) > 0 { option.empty() for _, d := range usedDefault { err := option.setDefault(&d) if err != nil { return err } } } else { tp := option.value.Type() switch tp.Kind() { case reflect.Map: if option.value.IsNil() { option.empty() } case reflect.Slice: if option.value.IsNil() { option.empty() } } } return nil } func (option *Option) valueIsDefault() bool { // Check if the value of the option corresponds to its // default value emptyval := option.emptyValue() checkvalptr := reflect.New(emptyval.Type()) checkval := reflect.Indirect(checkvalptr) checkval.Set(emptyval) if len(option.Default) != 0 { for _, v := range option.Default { convert(v, checkval, option.tag) } } return reflect.DeepEqual(option.value.Interface(), checkval.Interface()) } func (option *Option) isUnmarshaler() Unmarshaler { v := option.value for { if !v.CanInterface() { break } i := v.Interface() if u, ok := i.(Unmarshaler); ok { return u } if !v.CanAddr() { break } v = v.Addr() } return nil } func (option *Option) isValueValidator() ValueValidator { v := option.value for { if !v.CanInterface() { break } i := v.Interface() if u, ok := i.(ValueValidator); ok { return u } if !v.CanAddr() { break } v = v.Addr() } return nil } func (option *Option) isBool() bool { tp := option.value.Type() for { switch tp.Kind() { case reflect.Slice, reflect.Ptr: tp = tp.Elem() case reflect.Bool: return true case reflect.Func: return tp.NumIn() == 0 default: return false } } } func (option *Option) isSignedNumber() bool { tp := option.value.Type() for { switch tp.Kind() { case reflect.Slice, reflect.Ptr: tp = tp.Elem() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64: return true default: return false } } } func (option *Option) isFunc() bool { return option.value.Type().Kind() == reflect.Func } func (option *Option) call(value *string) error { var retval []reflect.Value if value == nil { retval = option.value.Call(nil) } else { tp := option.value.Type().In(0) val := reflect.New(tp) val = reflect.Indirect(val) if err := convert(*value, val, option.tag); err != nil { return err } retval = option.value.Call([]reflect.Value{val}) } if len(retval) == 1 && retval[0].Type() == reflect.TypeOf((*error)(nil)).Elem() { if retval[0].Interface() == nil { return nil } return retval[0].Interface().(error) } return nil } func (option *Option) updateDefaultLiteral() { defs := option.Default def := "" if len(defs) == 0 && option.canArgument() { var showdef bool switch option.field.Type.Kind() { case reflect.Func, reflect.Ptr: showdef = !option.value.IsNil() case reflect.Slice, reflect.String, reflect.Array: showdef = option.value.Len() > 0 case reflect.Map: showdef = !option.value.IsNil() && option.value.Len() > 0 default: zeroval := reflect.Zero(option.field.Type) showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface()) } if showdef { def, _ = convertToString(option.value, option.tag) } } else if len(defs) != 0 { l := len(defs) - 1 for i := 0; i < l; i++ { def += quoteIfNeeded(defs[i]) + ", " } def += quoteIfNeeded(defs[l]) } option.defaultLiteral = def } func (option *Option) shortAndLongName() string { ret := &bytes.Buffer{} if option.ShortName != 0 { ret.WriteRune(defaultShortOptDelimiter) ret.WriteRune(option.ShortName) } if len(option.LongName) != 0 { if option.ShortName != 0 { ret.WriteRune('/') } ret.WriteString(option.LongName) } return ret.String() } func (option *Option) isValidValue(arg string) error { if validator := option.isValueValidator(); validator != nil { return validator.IsValidValue(arg) } if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') { return fmt.Errorf("expected argument for flag `%s', but got option `%s'", option, arg) } return nil } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/command_test.go0000644000000000000000000003617415024302467025012 0ustar rootrootpackage flags import ( "fmt" "testing" ) func TestCommandInline(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { G bool `short:"g"` } `command:"cmd"` }{} p, ret := assertParserSuccess(t, &opts, "-v", "cmd", "-g") assertStringArray(t, ret, []string{}) if p.Active == nil { t.Errorf("Expected active command") } if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Command.G { t.Errorf("Expected Command.G to be true") } if p.Command.Find("cmd") != p.Active { t.Errorf("Expected to find command `cmd' to be active") } } func TestCommandInlineMulti(t *testing.T) { var opts = struct { Value bool `short:"v"` C1 struct { } `command:"c1"` C2 struct { G bool `short:"g"` } `command:"c2"` }{} p, ret := assertParserSuccess(t, &opts, "-v", "c2", "-g") assertStringArray(t, ret, []string{}) if p.Active == nil { t.Errorf("Expected active command") } if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.C2.G { t.Errorf("Expected C2.G to be true") } if p.Command.Find("c1") == nil { t.Errorf("Expected to find command `c1'") } if c2 := p.Command.Find("c2"); c2 == nil { t.Errorf("Expected to find command `c2'") } else if c2 != p.Active { t.Errorf("Expected to find command `c2' to be active") } } func TestCommandFlagOrder1(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { G bool `short:"g"` } `command:"cmd"` }{} assertParseFail(t, ErrUnknownFlag, "unknown flag `g'", &opts, "-v", "-g", "cmd") } func TestCommandFlagOrder2(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { G bool `short:"g"` } `command:"cmd"` }{} assertParseSuccess(t, &opts, "cmd", "-v", "-g") if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Command.G { t.Errorf("Expected Command.G to be true") } } func TestCommandFlagOrderSub(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { G bool `short:"g"` SubCommand struct { B bool `short:"b"` } `command:"sub"` } `command:"cmd"` }{} assertParseSuccess(t, &opts, "cmd", "sub", "-v", "-g", "-b") if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Command.G { t.Errorf("Expected Command.G to be true") } if !opts.Command.SubCommand.B { t.Errorf("Expected Command.SubCommand.B to be true") } } func TestCommandFlagOverride1(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { Value bool `short:"v"` } `command:"cmd"` }{} assertParseSuccess(t, &opts, "-v", "cmd") if !opts.Value { t.Errorf("Expected Value to be true") } if opts.Command.Value { t.Errorf("Expected Command.Value to be false") } } func TestCommandFlagOverride2(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { Value bool `short:"v"` } `command:"cmd"` }{} assertParseSuccess(t, &opts, "cmd", "-v") if opts.Value { t.Errorf("Expected Value to be false") } if !opts.Command.Value { t.Errorf("Expected Command.Value to be true") } } func TestCommandFlagOverrideSub(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { Value bool `short:"v"` SubCommand struct { Value bool `short:"v"` } `command:"sub"` } `command:"cmd"` }{} assertParseSuccess(t, &opts, "cmd", "sub", "-v") if opts.Value { t.Errorf("Expected Value to be false") } if opts.Command.Value { t.Errorf("Expected Command.Value to be false") } if !opts.Command.SubCommand.Value { t.Errorf("Expected Command.Value to be true") } } func TestCommandFlagOverrideSub2(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { Value bool `short:"v"` SubCommand struct { G bool `short:"g"` } `command:"sub"` } `command:"cmd"` }{} assertParseSuccess(t, &opts, "cmd", "sub", "-v") if opts.Value { t.Errorf("Expected Value to be false") } if !opts.Command.Value { t.Errorf("Expected Command.Value to be true") } } func TestCommandEstimate(t *testing.T) { var opts = struct { Value bool `short:"v"` Cmd1 struct { } `command:"remove"` Cmd2 struct { } `command:"add"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{}) assertError(t, err, ErrCommandRequired, "Please specify one command of: add or remove") } func TestCommandEstimate2(t *testing.T) { var opts = struct { Value bool `short:"v"` Cmd1 struct { } `command:"remove"` Cmd2 struct { } `command:"add"` }{} p := NewParser(&opts, None) _, err := p.ParseArgs([]string{"rmive"}) assertError(t, err, ErrUnknownCommand, "Unknown command `rmive', did you mean `remove'?") } type testCommand struct { G bool `short:"g"` Executed bool EArgs []string } func (c *testCommand) Execute(args []string) error { c.Executed = true c.EArgs = args return nil } func TestCommandExecute(t *testing.T) { var opts = struct { Value bool `short:"v"` Command testCommand `command:"cmd"` }{} assertParseSuccess(t, &opts, "-v", "cmd", "-g", "a", "b") if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Command.Executed { t.Errorf("Did not execute command") } if !opts.Command.G { t.Errorf("Expected Command.C to be true") } assertStringArray(t, opts.Command.EArgs, []string{"a", "b"}) } func TestCommandClosest(t *testing.T) { var opts = struct { Value bool `short:"v"` Cmd1 struct { } `command:"remove"` Cmd2 struct { } `command:"add"` }{} args := assertParseFail(t, ErrUnknownCommand, "Unknown command `addd', did you mean `add'?", &opts, "-v", "addd") assertStringArray(t, args, []string{"addd"}) } func TestCommandAdd(t *testing.T) { var opts = struct { Value bool `short:"v"` }{} var cmd = struct { G bool `short:"g"` }{} p := NewParser(&opts, Default) c, err := p.AddCommand("cmd", "", "", &cmd) if err != nil { t.Fatalf("Unexpected error: %v", err) return } ret, err := p.ParseArgs([]string{"-v", "cmd", "-g", "rest"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } assertStringArray(t, ret, []string{"rest"}) if !opts.Value { t.Errorf("Expected Value to be true") } if !cmd.G { t.Errorf("Expected Command.G to be true") } if p.Command.Find("cmd") != c { t.Errorf("Expected to find command `cmd'") } if p.Commands()[0] != c { t.Errorf("Expected command %#v, but got %#v", c, p.Commands()[0]) } if c.Options()[0].ShortName != 'g' { t.Errorf("Expected short name `g' but got %v", c.Options()[0].ShortName) } } func TestCommandNestedInline(t *testing.T) { var opts = struct { Value bool `short:"v"` Command struct { G bool `short:"g"` Nested struct { N string `long:"n"` } `command:"nested"` } `command:"cmd"` }{} p, ret := assertParserSuccess(t, &opts, "-v", "cmd", "-g", "nested", "--n", "n", "rest") assertStringArray(t, ret, []string{"rest"}) if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Command.G { t.Errorf("Expected Command.G to be true") } assertString(t, opts.Command.Nested.N, "n") if c := p.Command.Find("cmd"); c == nil { t.Errorf("Expected to find command `cmd'") } else { if c != p.Active { t.Errorf("Expected `cmd' to be the active parser command") } if nested := c.Find("nested"); nested == nil { t.Errorf("Expected to find command `nested'") } else if nested != c.Active { t.Errorf("Expected to find command `nested' to be the active `cmd' command") } } } func TestRequiredOnCommand(t *testing.T) { var opts = struct { Value bool `short:"v" required:"true"` Command struct { G bool `short:"g"` } `command:"cmd"` }{} assertParseFail(t, ErrRequired, fmt.Sprintf("the required flag `%cv' was not specified", defaultShortOptDelimiter), &opts, "cmd") } func TestRequiredAllOnCommand(t *testing.T) { var opts = struct { Value bool `short:"v" required:"true"` Missing bool `long:"missing" required:"true"` Command struct { G bool `short:"g"` } `command:"cmd"` }{} assertParseFail(t, ErrRequired, fmt.Sprintf("the required flags `%smissing' and `%cv' were not specified", defaultLongOptDelimiter, defaultShortOptDelimiter), &opts, "cmd") } func TestDefaultOnCommand(t *testing.T) { var opts = struct { Command struct { G string `short:"g" default:"value"` } `command:"cmd"` }{} assertParseSuccess(t, &opts, "cmd") if opts.Command.G != "value" { t.Errorf("Expected G to be \"value\"") } } func TestAfterNonCommand(t *testing.T) { var opts = struct { Value bool `short:"v"` Cmd1 struct { } `command:"remove"` Cmd2 struct { } `command:"add"` }{} assertParseFail(t, ErrUnknownCommand, "Unknown command `nocmd'. Please specify one command of: add or remove", &opts, "nocmd", "remove") } func TestSubcommandsOptional(t *testing.T) { var opts = struct { Value bool `short:"v"` Cmd1 struct { } `command:"remove"` Cmd2 struct { } `command:"add"` }{} p := NewParser(&opts, None) p.SubcommandsOptional = true _, err := p.ParseArgs([]string{"-v"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if !opts.Value { t.Errorf("Expected Value to be true") } } func TestSubcommandsOptionalAfterNonCommand(t *testing.T) { var opts = struct { Value bool `short:"v"` Cmd1 struct { } `command:"remove"` Cmd2 struct { } `command:"add"` }{} p := NewParser(&opts, None) p.SubcommandsOptional = true retargs, err := p.ParseArgs([]string{"nocmd", "remove"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } assertStringArray(t, retargs, []string{"nocmd", "remove"}) } func TestCommandAlias(t *testing.T) { var opts = struct { Command struct { G string `short:"g" default:"value"` } `command:"cmd" alias:"cm"` }{} assertParseSuccess(t, &opts, "cm") if opts.Command.G != "value" { t.Errorf("Expected G to be \"value\"") } } func TestSubCommandFindOptionByLongFlag(t *testing.T) { var opts struct { Testing bool `long:"testing" description:"Testing"` } var cmd struct { Other bool `long:"other" description:"Other"` } p := NewParser(&opts, Default) c, _ := p.AddCommand("command", "Short", "Long", &cmd) opt := c.FindOptionByLongName("other") if opt == nil { t.Errorf("Expected option, but found none") } assertString(t, opt.LongName, "other") opt = c.FindOptionByLongName("testing") if opt == nil { t.Errorf("Expected option, but found none") } assertString(t, opt.LongName, "testing") } func TestSubCommandFindOptionByShortFlag(t *testing.T) { var opts struct { Testing bool `short:"t" description:"Testing"` } var cmd struct { Other bool `short:"o" description:"Other"` } p := NewParser(&opts, Default) c, _ := p.AddCommand("command", "Short", "Long", &cmd) opt := c.FindOptionByShortName('o') if opt == nil { t.Errorf("Expected option, but found none") } if opt.ShortName != 'o' { t.Errorf("Expected 'o', but got %v", opt.ShortName) } opt = c.FindOptionByShortName('t') if opt == nil { t.Errorf("Expected option, but found none") } if opt.ShortName != 't' { t.Errorf("Expected 'o', but got %v", opt.ShortName) } } type fooCmd struct { Flag bool `short:"f"` args []string } func (foo *fooCmd) Execute(s []string) error { foo.args = s return nil } func TestCommandPassAfterNonOption(t *testing.T) { var opts = struct { Value bool `short:"v"` Foo fooCmd `command:"foo"` }{} p := NewParser(&opts, PassAfterNonOption) ret, err := p.ParseArgs([]string{"-v", "foo", "-f", "bar", "-v", "-g"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Foo.Flag { t.Errorf("Expected Foo.Flag to be true") } assertStringArray(t, ret, []string{"bar", "-v", "-g"}) assertStringArray(t, opts.Foo.args, []string{"bar", "-v", "-g"}) } type barCmd struct { fooCmd Positional struct { Args []string } `positional-args:"yes"` } func TestCommandPassAfterNonOptionWithPositional(t *testing.T) { var opts = struct { Value bool `short:"v"` Bar barCmd `command:"bar"` }{} p := NewParser(&opts, PassAfterNonOption) ret, err := p.ParseArgs([]string{"-v", "bar", "-f", "baz", "-v", "-g"}) if err != nil { t.Fatalf("Unexpected error: %v", err) return } if !opts.Value { t.Errorf("Expected Value to be true") } if !opts.Bar.Flag { t.Errorf("Expected Bar.Flag to be true") } assertStringArray(t, ret, []string{}) assertStringArray(t, opts.Bar.args, []string{}) assertStringArray(t, opts.Bar.Positional.Args, []string{"baz", "-v", "-g"}) } type cmdLocalPassAfterNonOptionMix struct { FlagA bool `short:"a"` Cmd1 struct { FlagB bool `short:"b"` Positional struct { Args []string } `positional-args:"yes"` } `command:"cmd1" pass-after-non-option:"yes"` Cmd2 struct { FlagB bool `short:"b"` Positional struct { Args []string } `positional-args:"yes"` } `command:"cmd2"` } func TestCommandLocalPassAfterNonOptionMixCmd1(t *testing.T) { var opts cmdLocalPassAfterNonOptionMix assertParseSuccess(t, &opts, "cmd1", "-b", "arg1", "-a", "arg2", "-x") if opts.FlagA { t.Errorf("Expected FlagA to be false") } if !opts.Cmd1.FlagB { t.Errorf("Expected Cmd1.FlagB to be true") } assertStringArray(t, opts.Cmd1.Positional.Args, []string{"arg1", "-a", "arg2", "-x"}) } func TestCommandLocalPassAfterNonOptionMixCmd2(t *testing.T) { var opts cmdLocalPassAfterNonOptionMix assertParseSuccess(t, &opts, "cmd2", "-b", "arg1", "-a", "arg2") if !opts.FlagA { t.Errorf("Expected FlagA to be true") } if !opts.Cmd2.FlagB { t.Errorf("Expected Cmd2.FlagB to be true") } assertStringArray(t, opts.Cmd2.Positional.Args, []string{"arg1", "arg2"}) } func TestCommandLocalPassAfterNonOptionMixCmd2UnkownFlag(t *testing.T) { var opts cmdLocalPassAfterNonOptionMix assertParseFail(t, ErrUnknownFlag, "unknown flag `x'", &opts, "cmd2", "-b", "arg1", "-a", "arg2", "-x") } type cmdLocalPassAfterNonOptionNest struct { FlagA bool `short:"a"` Cmd1 struct { FlagB bool `short:"b"` Cmd2 struct { FlagC bool `short:"c"` Cmd3 struct { FlagD bool `short:"d"` } `command:"cmd3"` } `command:"cmd2" subcommands-optional:"yes" pass-after-non-option:"yes"` } `command:"cmd1"` } func TestCommandLocalPassAfterNonOptionNest1(t *testing.T) { var opts cmdLocalPassAfterNonOptionNest ret := assertParseSuccess(t, &opts, "cmd1", "cmd2", "-a", "x", "-b", "cmd3", "-c", "-d") if !opts.FlagA { t.Errorf("Expected FlagA to be true") } if opts.Cmd1.FlagB { t.Errorf("Expected Cmd1.FlagB to be false") } if opts.Cmd1.Cmd2.FlagC { t.Errorf("Expected Cmd1.Cmd2.FlagC to be false") } if opts.Cmd1.Cmd2.Cmd3.FlagD { t.Errorf("Expected Cmd1.Cmd2.Cmd3.FlagD to be false") } assertStringArray(t, ret, []string{"x", "-b", "cmd3", "-c", "-d"}) } func TestCommandLocalPassAfterNonOptionNest2(t *testing.T) { var opts cmdLocalPassAfterNonOptionNest ret := assertParseSuccess(t, &opts, "cmd1", "cmd2", "cmd3", "-a", "x", "-b", "-c", "-d") if !opts.FlagA { t.Errorf("Expected FlagA to be true") } if !opts.Cmd1.FlagB { t.Errorf("Expected Cmd1.FlagB to be true") } if !opts.Cmd1.Cmd2.FlagC { t.Errorf("Expected Cmd1.Cmd2.FlagC to be true") } if !opts.Cmd1.Cmd2.Cmd3.FlagD { t.Errorf("Expected Cmd1.Cmd2.Cmd3.FlagD to be true") } assertStringArray(t, ret, []string{"x"}) } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/completion.go0000644000000000000000000001567715024302467024513 0ustar rootrootpackage flags import ( "fmt" "os" "path/filepath" "reflect" "sort" "strings" "unicode/utf8" ) // Completion is a type containing information of a completion. type Completion struct { // The completed item Item string // A description of the completed item (optional) Description string } type completions []Completion func (c completions) Len() int { return len(c) } func (c completions) Less(i, j int) bool { return c[i].Item < c[j].Item } func (c completions) Swap(i, j int) { c[i], c[j] = c[j], c[i] } // Completer is an interface which can be implemented by types // to provide custom command line argument completion. type Completer interface { // Complete receives a prefix representing a (partial) value // for its type and should provide a list of possible valid // completions. Complete(match string) []Completion } type completion struct { parser *Parser } // Filename is a string alias which provides filename completion. type Filename string func completionsWithoutDescriptions(items []string) []Completion { ret := make([]Completion, len(items)) for i, v := range items { ret[i].Item = v } return ret } // Complete returns a list of existing files with the given // prefix. func (f *Filename) Complete(match string) []Completion { ret, _ := filepath.Glob(match + "*") if len(ret) == 1 { if info, err := os.Stat(ret[0]); err == nil && info.IsDir() { ret[0] = ret[0] + "/" } } return completionsWithoutDescriptions(ret) } func (c *completion) skipPositional(s *parseState, n int) { if n >= len(s.positional) { s.positional = nil } else { s.positional = s.positional[n:] } } func (c *completion) completeOptionNames(s *parseState, prefix string, match string, short bool) []Completion { if short && len(match) != 0 { return []Completion{ { Item: prefix + match, }, } } var results []Completion repeats := map[string]bool{} for name, opt := range s.lookup.longNames { if strings.HasPrefix(name, match) && !opt.Hidden { results = append(results, Completion{ Item: defaultLongOptDelimiter + name, Description: opt.Description, }) if short { repeats[string(opt.ShortName)] = true } } } if short { for name, opt := range s.lookup.shortNames { if _, exist := repeats[name]; !exist && strings.HasPrefix(name, match) && !opt.Hidden { results = append(results, Completion{ Item: string(defaultShortOptDelimiter) + name, Description: opt.Description, }) } } } return results } func (c *completion) completeNamesForLongPrefix(s *parseState, prefix string, match string) []Completion { return c.completeOptionNames(s, prefix, match, false) } func (c *completion) completeNamesForShortPrefix(s *parseState, prefix string, match string) []Completion { return c.completeOptionNames(s, prefix, match, true) } func (c *completion) completeCommands(s *parseState, match string) []Completion { n := make([]Completion, 0, len(s.command.commands)) for _, cmd := range s.command.commands { if cmd.data != c && !cmd.Hidden && strings.HasPrefix(cmd.Name, match) { n = append(n, Completion{ Item: cmd.Name, Description: cmd.ShortDescription, }) } } return n } func (c *completion) completeValue(value reflect.Value, prefix string, match string) []Completion { if value.Kind() == reflect.Slice { value = reflect.New(value.Type().Elem()) } i := value.Interface() var ret []Completion if cmp, ok := i.(Completer); ok { ret = cmp.Complete(match) } else if value.CanAddr() { if cmp, ok = value.Addr().Interface().(Completer); ok { ret = cmp.Complete(match) } } for i, v := range ret { ret[i].Item = prefix + v.Item } return ret } func (c *completion) complete(args []string) []Completion { if len(args) == 0 { args = []string{""} } s := &parseState{ args: args, } c.parser.fillParseState(s) var opt *Option for len(s.args) > 1 { arg := s.pop() if (c.parser.Options&PassDoubleDash) != None && arg == "--" { opt = nil c.skipPositional(s, len(s.args)-1) break } if argumentIsOption(arg) { prefix, optname, islong := stripOptionPrefix(arg) optname, _, argument := splitOption(prefix, optname, islong) if argument == nil { var o *Option canarg := true if islong { o = s.lookup.longNames[optname] } else { for i, r := range optname { sname := string(r) o = s.lookup.shortNames[sname] if o == nil { break } if i == 0 && o.canArgument() && len(optname) != len(sname) { canarg = false break } } } if o == nil && (c.parser.Options&PassAfterNonOption) != None { opt = nil c.skipPositional(s, len(s.args)-1) break } else if o != nil && o.canArgument() && !o.OptionalArgument && canarg { if len(s.args) > 1 { s.pop() } else { opt = o } } } } else { if len(s.positional) > 0 { if !s.positional[0].isRemaining() { // Don't advance beyond a remaining positional arg (because // it consumes all subsequent args). s.positional = s.positional[1:] } } else if cmd, ok := s.lookup.commands[arg]; ok { cmd.fillParseState(s) } opt = nil } } lastarg := s.args[len(s.args)-1] var ret []Completion if opt != nil { // Completion for the argument of 'opt' ret = c.completeValue(opt.value, "", lastarg) } else if argumentStartsOption(lastarg) { // Complete the option prefix, optname, islong := stripOptionPrefix(lastarg) optname, split, argument := splitOption(prefix, optname, islong) if argument == nil && !islong { rname, n := utf8.DecodeRuneInString(optname) sname := string(rname) if opt := s.lookup.shortNames[sname]; opt != nil && opt.canArgument() { ret = c.completeValue(opt.value, prefix+sname, optname[n:]) } else { ret = c.completeNamesForShortPrefix(s, prefix, optname) } } else if argument != nil { if islong { opt = s.lookup.longNames[optname] } else { opt = s.lookup.shortNames[optname] } if opt != nil { ret = c.completeValue(opt.value, prefix+optname+split, *argument) } } else if islong { ret = c.completeNamesForLongPrefix(s, prefix, optname) } else { ret = c.completeNamesForShortPrefix(s, prefix, optname) } } else if len(s.positional) > 0 { // Complete for positional argument ret = c.completeValue(s.positional[0].value, "", lastarg) } else if len(s.command.commands) > 0 { // Complete for command ret = c.completeCommands(s, lastarg) } sort.Sort(completions(ret)) return ret } func (c *completion) print(items []Completion, showDescriptions bool) { if showDescriptions && len(items) > 1 { maxl := 0 for _, v := range items { if len(v.Item) > maxl { maxl = len(v.Item) } } for _, v := range items { fmt.Printf("%s", v.Item) if len(v.Description) > 0 { fmt.Printf("%s # %s", strings.Repeat(" ", maxl-len(v.Item)), v.Description) } fmt.Printf("\n") } } else { for _, v := range items { fmt.Println(v.Item) } } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/man.go0000644000000000000000000001231015024302467023072 0ustar rootrootpackage flags import ( "fmt" "io" "os" "runtime" "strconv" "strings" "time" ) func manQuoteLines(s string) string { lines := strings.Split(s, "\n") parts := []string{} for _, line := range lines { parts = append(parts, manQuote(line)) } return strings.Join(parts, "\n") } func manQuote(s string) string { return strings.Replace(s, "\\", "\\\\", -1) } func formatForMan(wr io.Writer, s string, quoter func(s string) string) { for { idx := strings.IndexRune(s, '`') if idx < 0 { fmt.Fprintf(wr, "%s", quoter(s)) break } fmt.Fprintf(wr, "%s", quoter(s[:idx])) s = s[idx+1:] idx = strings.IndexRune(s, '\'') if idx < 0 { fmt.Fprintf(wr, "%s", quoter(s)) break } fmt.Fprintf(wr, "\\fB%s\\fP", quoter(s[:idx])) s = s[idx+1:] } } func writeManPageOptions(wr io.Writer, grp *Group) { grp.eachGroup(func(group *Group) { if !group.showInHelp() { return } // If the parent (grp) has any subgroups, display their descriptions as // subsection headers similar to the output of --help. if group.ShortDescription != "" && len(grp.groups) > 0 { fmt.Fprintf(wr, ".SS %s\n", group.ShortDescription) if group.LongDescription != "" { formatForMan(wr, group.LongDescription, manQuoteLines) fmt.Fprintln(wr, "") } } for _, opt := range group.options { if !opt.showInHelp() { continue } fmt.Fprintln(wr, ".TP") fmt.Fprintf(wr, "\\fB") if opt.ShortName != 0 { fmt.Fprintf(wr, "\\fB\\-%c\\fR", opt.ShortName) } if len(opt.LongName) != 0 { if opt.ShortName != 0 { fmt.Fprintf(wr, ", ") } fmt.Fprintf(wr, "\\fB\\-\\-%s\\fR", manQuote(opt.LongNameWithNamespace())) } if len(opt.ValueName) != 0 || opt.OptionalArgument { if opt.OptionalArgument { fmt.Fprintf(wr, " [\\fI%s=%s\\fR]", manQuote(opt.ValueName), manQuote(strings.Join(quoteV(opt.OptionalValue), ", "))) } else { fmt.Fprintf(wr, " \\fI%s\\fR", manQuote(opt.ValueName)) } } if len(opt.Default) != 0 { fmt.Fprintf(wr, " ", manQuote(strings.Join(quoteV(opt.Default), ", "))) } else if len(opt.EnvKeyWithNamespace()) != 0 { if runtime.GOOS == "windows" { fmt.Fprintf(wr, " ", manQuote(opt.EnvKeyWithNamespace())) } else { fmt.Fprintf(wr, " ", manQuote(opt.EnvKeyWithNamespace())) } } if opt.Required { fmt.Fprintf(wr, " (\\fIrequired\\fR)") } fmt.Fprintln(wr, "\\fP") if len(opt.Description) != 0 { formatForMan(wr, opt.Description, manQuoteLines) fmt.Fprintln(wr, "") } } }) } func writeManPageSubcommands(wr io.Writer, name string, usagePrefix string, root *Command) { commands := root.sortedVisibleCommands() for _, c := range commands { var nn string if c.Hidden { continue } if len(name) != 0 { nn = name + " " + c.Name } else { nn = c.Name } writeManPageCommand(wr, nn, usagePrefix, c) } } func writeManPageCommand(wr io.Writer, name string, usagePrefix string, command *Command) { fmt.Fprintf(wr, ".SS %s\n", name) fmt.Fprintln(wr, command.ShortDescription) if len(command.LongDescription) > 0 { fmt.Fprintln(wr, "") cmdstart := fmt.Sprintf("The %s command", manQuote(command.Name)) if strings.HasPrefix(command.LongDescription, cmdstart) { fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name)) formatForMan(wr, command.LongDescription[len(cmdstart):], manQuoteLines) fmt.Fprintln(wr, "") } else { formatForMan(wr, command.LongDescription, manQuoteLines) fmt.Fprintln(wr, "") } } var pre = usagePrefix + " " + command.Name var usage string if us, ok := command.data.(Usage); ok { usage = us.Usage() } else if command.hasHelpOptions() { usage = fmt.Sprintf("[%s-OPTIONS]", command.Name) } var nextPrefix = pre if len(usage) > 0 { fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage)) nextPrefix = pre + " " + usage } if len(command.Aliases) > 0 { fmt.Fprintf(wr, "\n\\fBAliases\\fP: %s\n\n", manQuote(strings.Join(command.Aliases, ", "))) } writeManPageOptions(wr, command.Group) writeManPageSubcommands(wr, name, nextPrefix, command) } // WriteManPage writes a basic man page in groff format to the specified // writer. func (p *Parser) WriteManPage(wr io.Writer) { t := time.Now() source_date_epoch := os.Getenv("SOURCE_DATE_EPOCH") if source_date_epoch != "" { sde, err := strconv.ParseInt(source_date_epoch, 10, 64) if err != nil { panic(fmt.Sprintf("Invalid SOURCE_DATE_EPOCH: %s", err)) } t = time.Unix(sde, 0) } fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006")) fmt.Fprintln(wr, ".SH NAME") fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuoteLines(p.ShortDescription)) fmt.Fprintln(wr, ".SH SYNOPSIS") usage := p.Usage if len(usage) == 0 { usage = "[OPTIONS]" } fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage)) fmt.Fprintln(wr, ".SH DESCRIPTION") formatForMan(wr, p.LongDescription, manQuoteLines) fmt.Fprintln(wr, "") fmt.Fprintln(wr, ".SH OPTIONS") writeManPageOptions(wr, p.Command.Group) if len(p.visibleCommands()) > 0 { fmt.Fprintln(wr, ".SH COMMANDS") writeManPageSubcommands(wr, "", p.Name+" "+usage, p.Command) } } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/convert_test.go0000644000000000000000000000570715024302467025052 0ustar rootrootpackage flags import ( "testing" "time" ) func expectConvert(t *testing.T, o *Option, expected string) { s, err := convertToString(o.value, o.tag) if err != nil { t.Errorf("Unexpected error: %v", err) return } assertString(t, s, expected) } func TestConvertToString(t *testing.T) { d, _ := time.ParseDuration("1h2m4s") var opts = struct { String string `long:"string"` Int int `long:"int"` Int8 int8 `long:"int8"` Int16 int16 `long:"int16"` Int32 int32 `long:"int32"` Int64 int64 `long:"int64"` Uint uint `long:"uint"` Uint8 uint8 `long:"uint8"` Uint16 uint16 `long:"uint16"` Uint32 uint32 `long:"uint32"` Uint64 uint64 `long:"uint64"` Float32 float32 `long:"float32"` Float64 float64 `long:"float64"` Duration time.Duration `long:"duration"` Bool bool `long:"bool"` IntSlice []int `long:"int-slice"` IntFloatMap map[int]float64 `long:"int-float-map"` PtrBool *bool `long:"ptr-bool"` Interface interface{} `long:"interface"` Int32Base int32 `long:"int32-base" base:"16"` Uint32Base uint32 `long:"uint32-base" base:"16"` }{ "string", -2, -1, 0, 1, 2, 1, 2, 3, 4, 5, 1.2, -3.4, d, true, []int{-3, 4, -2}, map[int]float64{-2: 4.5}, new(bool), float32(5.2), -5823, 4232, } p := NewNamedParser("test", Default) grp, _ := p.AddGroup("test group", "", &opts) expects := []string{ "string", "-2", "-1", "0", "1", "2", "1", "2", "3", "4", "5", "1.2", "-3.4", "1h2m4s", "true", "[-3, 4, -2]", "{-2:4.5}", "false", "5.2", "-16bf", "1088", } for i, v := range grp.Options() { expectConvert(t, v, expects[i]) } } func TestConvertToStringInvalidIntBase(t *testing.T) { var opts = struct { Int int `long:"int" base:"no"` }{ 2, } p := NewNamedParser("test", Default) grp, _ := p.AddGroup("test group", "", &opts) o := grp.Options()[0] _, err := convertToString(o.value, o.tag) if err != nil { err = newErrorf(ErrMarshal, "%v", err) } assertError(t, err, ErrMarshal, "strconv.ParseInt: parsing \"no\": invalid syntax") } func TestConvertToStringInvalidUintBase(t *testing.T) { var opts = struct { Uint uint `long:"uint" base:"no"` }{ 2, } p := NewNamedParser("test", Default) grp, _ := p.AddGroup("test group", "", &opts) o := grp.Options()[0] _, err := convertToString(o.value, o.tag) if err != nil { err = newErrorf(ErrMarshal, "%v", err) } assertError(t, err, ErrMarshal, "strconv.ParseInt: parsing \"no\": invalid syntax") } func TestConvertToMapWithDelimiter(t *testing.T) { var opts = struct { StringStringMap map[string]string `long:"string-string-map" key-value-delimiter:"="` }{} p := NewNamedParser("test", Default) grp, _ := p.AddGroup("test group", "", &opts) o := grp.Options()[0] err := convert("key=value", o.value, o.tag) if err != nil { t.Errorf("Unexpected error: %v", err) return } assertString(t, opts.StringStringMap["key"], "value") } dependencies/pkg/mod/github.com/jessevdk/go-flags@v1.6.1/multitag.go0000644000000000000000000000462715024302467024161 0ustar rootrootpackage flags import ( "strconv" ) type multiTag struct { value string cache map[string][]string } func newMultiTag(v string) multiTag { return multiTag{ value: v, } } func (x *multiTag) scan() (map[string][]string, error) { v := x.value ret := make(map[string][]string) // This is mostly copied from reflect.StructTag.Get for v != "" { i := 0 // Skip whitespace for i < len(v) && v[i] == ' ' { i++ } v = v[i:] if v == "" { break } // Scan to colon to find key i = 0 for i < len(v) && v[i] != ' ' && v[i] != ':' && v[i] != '"' { i++ } if i >= len(v) { return nil, newErrorf(ErrTag, "expected `:' after key name, but got end of tag (in `%v`)", x.value) } if v[i] != ':' { return nil, newErrorf(ErrTag, "expected `:' after key name, but got `%v' (in `%v`)", v[i], x.value) } if i+1 >= len(v) { return nil, newErrorf(ErrTag, "expected `\"' to start tag value at end of tag (in `%v`)", x.value) } if v[i+1] != '"' { return nil, newErrorf(ErrTag, "expected `\"' to start tag value, but got `%v' (in `%v`)", v[i+1], x.value) } name := v[:i] v = v[i+1:] // Scan quoted string to find value i = 1 for i < len(v) && v[i] != '"' { if v[i] == '\n' { return nil, newErrorf(ErrTag, "unexpected newline in tag value `%v' (in `%v`)", name, x.value) } if v[i] == '\\' { i++ } i++ } if i >= len(v) { return nil, newErrorf(ErrTag, "expected end of tag value `\"' at end of tag (in `%v`)", x.value) } val, err := strconv.Unquote(v[:i+1]) if err != nil { return nil, newErrorf(ErrTag, "Malformed value of tag `%v:%v` => %v (in `%v`)", name, v[:i+1], err, x.value) } v = v[i+1:] ret[name] = append(ret[name], val) } return ret, nil } func (x *multiTag) Parse() error { vals, err := x.scan() x.cache = vals return err } func (x *multiTag) cached() map[string][]string { if x.cache == nil { cache, _ := x.scan() if cache == nil { cache = make(map[string][]string) } x.cache = cache } return x.cache } func (x *multiTag) Get(key string) string { c := x.cached() if v, ok := c[key]; ok { return v[len(v)-1] } return "" } func (x *multiTag) GetMany(key string) []string { c := x.cached() return c[key] } func (x *multiTag) Set(key string, value string) { c := x.cached() c[key] = []string{value} } func (x *multiTag) SetMany(key string, value []string) { c := x.cached() c[key] = value } dependencies/pkg/mod/github.com/acarl005/0000775000000000000000000000000015024302472017111 5ustar rootrootdependencies/pkg/mod/github.com/acarl005/stripansi@v0.0.0-20180116102854-5a71ef0e047d/0000755000000000000000000000000015024302472025163 5ustar rootrootdependencies/pkg/mod/github.com/acarl005/stripansi@v0.0.0-20180116102854-5a71ef0e047d/LICENSE0000644000000000000000000000205715024302472026174 0ustar rootrootMIT License Copyright (c) 2018 Andrew Carlson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dependencies/pkg/mod/github.com/acarl005/stripansi@v0.0.0-20180116102854-5a71ef0e047d/README.md0000644000000000000000000000110015024302472026432 0ustar rootrootStrip ANSI ========== This Go package removes ANSI escape codes from strings. Ideally, we would prevent these from appearing in any text we want to process. However, sometimes this can't be helped, and we need to be able to deal with that noise. This will use a regexp to remove those unwanted escape codes. ## Install ```sh $ go get -u github.com/acarl005/stripansi ``` ## Usage ```go import ( "fmt" "github.com/acarl005/stripansi" ) func main() { msg := "\x1b[38;5;140m foo\x1b[0m bar" cleanMsg := stripansi.Strip(msg) fmt.Println(cleanMsg) // " foo bar" } ``` dependencies/pkg/mod/github.com/acarl005/stripansi@v0.0.0-20180116102854-5a71ef0e047d/stripansi.go0000644000000000000000000000044515024302472027531 0ustar rootrootpackage stripansi import ( "regexp" ) const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" var re = regexp.MustCompile(ansi) func Strip(str string) string { return re.ReplaceAllString(str, "") } dependencies/pkg/mod/github.com/lib/0000775000000000000000000000000015024302467016354 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/0000755000000000000000000000000015024302467017727 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/rows.go0000644000000000000000000000463115024302467021254 0ustar rootrootpackage pq import ( "math" "reflect" "time" "github.com/lib/pq/oid" ) const headerSize = 4 type fieldDesc struct { // The object ID of the data type. OID oid.Oid // The data type size (see pg_type.typlen). // Note that negative values denote variable-width types. Len int // The type modifier (see pg_attribute.atttypmod). // The meaning of the modifier is type-specific. Mod int } func (fd fieldDesc) Type() reflect.Type { switch fd.OID { case oid.T_int8: return reflect.TypeOf(int64(0)) case oid.T_int4: return reflect.TypeOf(int32(0)) case oid.T_int2: return reflect.TypeOf(int16(0)) case oid.T_varchar, oid.T_text: return reflect.TypeOf("") case oid.T_bool: return reflect.TypeOf(false) case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: return reflect.TypeOf(time.Time{}) case oid.T_bytea: return reflect.TypeOf([]byte(nil)) default: return reflect.TypeOf(new(interface{})).Elem() } } func (fd fieldDesc) Name() string { return oid.TypeName[fd.OID] } func (fd fieldDesc) Length() (length int64, ok bool) { switch fd.OID { case oid.T_text, oid.T_bytea: return math.MaxInt64, true case oid.T_varchar, oid.T_bpchar: return int64(fd.Mod - headerSize), true default: return 0, false } } func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { switch fd.OID { case oid.T_numeric, oid.T__numeric: mod := fd.Mod - headerSize precision = int64((mod >> 16) & 0xffff) scale = int64(mod & 0xffff) return precision, scale, true default: return 0, 0, false } } // ColumnTypeScanType returns the value type that can be used to scan types into. func (rs *rows) ColumnTypeScanType(index int) reflect.Type { return rs.colTyps[index].Type() } // ColumnTypeDatabaseTypeName return the database system type name. func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { return rs.colTyps[index].Name() } // ColumnTypeLength returns the length of the column type if the column is a // variable length type. If the column is not a variable length type ok // should return false. func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { return rs.colTyps[index].Length() } // ColumnTypePrecisionScale should return the precision and scale for decimal // types. If not applicable, ok should be false. func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { return rs.colTyps[index].PrecisionScale() } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/conn_go115.go0000644000000000000000000000015715024302467022132 0ustar rootroot//go:build go1.15 // +build go1.15 package pq import "database/sql/driver" var _ driver.Validator = &conn{} dependencies/pkg/mod/github.com/lib/pq@v1.10.9/conn.go0000644000000000000000000014250715024302467021224 0ustar rootrootpackage pq import ( "bufio" "bytes" "context" "crypto/md5" "crypto/sha256" "database/sql" "database/sql/driver" "encoding/binary" "errors" "fmt" "io" "net" "os" "os/user" "path" "path/filepath" "strconv" "strings" "sync" "time" "unicode" "github.com/lib/pq/oid" "github.com/lib/pq/scram" ) // Common error types var ( ErrNotSupported = errors.New("pq: Unsupported command") ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") ErrSSLKeyUnknownOwnership = errors.New("pq: Could not get owner information for private key, may not be properly protected") ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key has world access. Permissions should be u=rw,g=r (0640) if owned by root, or u=rw (0600), or less") ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") errUnexpectedReady = errors.New("unexpected ReadyForQuery") errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") ) // Compile time validation that our types implement the expected interfaces var ( _ driver.Driver = Driver{} ) // Driver is the Postgres database driver. type Driver struct{} // Open opens a new connection to the database. name is a connection string. // Most users should only use it through database/sql package from the standard // library. func (d Driver) Open(name string) (driver.Conn, error) { return Open(name) } func init() { sql.Register("postgres", &Driver{}) } type parameterStatus struct { // server version in the same format as server_version_num, or 0 if // unavailable serverVersion int // the current location based on the TimeZone value of the session, if // available currentLocation *time.Location } type transactionStatus byte const ( txnStatusIdle transactionStatus = 'I' txnStatusIdleInTransaction transactionStatus = 'T' txnStatusInFailedTransaction transactionStatus = 'E' ) func (s transactionStatus) String() string { switch s { case txnStatusIdle: return "idle" case txnStatusIdleInTransaction: return "idle in transaction" case txnStatusInFailedTransaction: return "in a failed transaction" default: errorf("unknown transactionStatus %d", s) } panic("not reached") } // Dialer is the dialer interface. It can be used to obtain more control over // how pq creates network connections. type Dialer interface { Dial(network, address string) (net.Conn, error) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) } // DialerContext is the context-aware dialer interface. type DialerContext interface { DialContext(ctx context.Context, network, address string) (net.Conn, error) } type defaultDialer struct { d net.Dialer } func (d defaultDialer) Dial(network, address string) (net.Conn, error) { return d.d.Dial(network, address) } func (d defaultDialer) DialTimeout( network, address string, timeout time.Duration, ) (net.Conn, error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() return d.DialContext(ctx, network, address) } func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { return d.d.DialContext(ctx, network, address) } type conn struct { c net.Conn buf *bufio.Reader namei int scratch [512]byte txnStatus transactionStatus txnFinish func() // Save connection arguments to use during CancelRequest. dialer Dialer opts values // Cancellation key data for use with CancelRequest messages. processID int secretKey int parameterStatus parameterStatus saveMessageType byte saveMessageBuffer []byte // If an error is set, this connection is bad and all public-facing // functions should return the appropriate error by calling get() // (ErrBadConn) or getForNext(). err syncErr // If set, this connection should never use the binary format when // receiving query results from prepared statements. Only provided for // debugging. disablePreparedBinaryResult bool // Whether to always send []byte parameters over as binary. Enables single // round-trip mode for non-prepared Query calls. binaryParameters bool // If true this connection is in the middle of a COPY inCopy bool // If not nil, notices will be synchronously sent here noticeHandler func(*Error) // If not nil, notifications will be synchronously sent here notificationHandler func(*Notification) // GSSAPI context gss GSS } type syncErr struct { err error sync.Mutex } // Return ErrBadConn if connection is bad. func (e *syncErr) get() error { e.Lock() defer e.Unlock() if e.err != nil { return driver.ErrBadConn } return nil } // Return the error set on the connection. Currently only used by rows.Next. func (e *syncErr) getForNext() error { e.Lock() defer e.Unlock() return e.err } // Set error, only if it isn't set yet. func (e *syncErr) set(err error) { if err == nil { panic("attempt to set nil err") } e.Lock() defer e.Unlock() if e.err == nil { e.err = err } } // Handle driver-side settings in parsed connection string. func (cn *conn) handleDriverSettings(o values) (err error) { boolSetting := func(key string, val *bool) error { if value, ok := o[key]; ok { if value == "yes" { *val = true } else if value == "no" { *val = false } else { return fmt.Errorf("unrecognized value %q for %s", value, key) } } return nil } err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) if err != nil { return err } return boolSetting("binary_parameters", &cn.binaryParameters) } func (cn *conn) handlePgpass(o values) { // if a password was supplied, do not process .pgpass if _, ok := o["password"]; ok { return } filename := os.Getenv("PGPASSFILE") if filename == "" { // XXX this code doesn't work on Windows where the default filename is // XXX %APPDATA%\postgresql\pgpass.conf // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 userHome := os.Getenv("HOME") if userHome == "" { user, err := user.Current() if err != nil { return } userHome = user.HomeDir } filename = filepath.Join(userHome, ".pgpass") } fileinfo, err := os.Stat(filename) if err != nil { return } mode := fileinfo.Mode() if mode&(0x77) != 0 { // XXX should warn about incorrect .pgpass permissions as psql does return } file, err := os.Open(filename) if err != nil { return } defer file.Close() scanner := bufio.NewScanner(io.Reader(file)) // From: https://github.com/tg/pgpass/blob/master/reader.go for scanner.Scan() { if scanText(scanner.Text(), o) { break } } } // GetFields is a helper function for scanText. func getFields(s string) []string { fs := make([]string, 0, 5) f := make([]rune, 0, len(s)) var esc bool for _, c := range s { switch { case esc: f = append(f, c) esc = false case c == '\\': esc = true case c == ':': fs = append(fs, string(f)) f = f[:0] default: f = append(f, c) } } return append(fs, string(f)) } // ScanText assists HandlePgpass in it's objective. func scanText(line string, o values) bool { hostname := o["host"] ntw, _ := network(o) port := o["port"] db := o["dbname"] username := o["user"] if len(line) == 0 || line[0] == '#' { return false } split := getFields(line) if len(split) != 5 { return false } if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { o["password"] = split[4] return true } return false } func (cn *conn) writeBuf(b byte) *writeBuf { cn.scratch[0] = b return &writeBuf{ buf: cn.scratch[:5], pos: 1, } } // Open opens a new connection to the database. dsn is a connection string. // Most users should only use it through database/sql package from the standard // library. func Open(dsn string) (_ driver.Conn, err error) { return DialOpen(defaultDialer{}, dsn) } // DialOpen opens a new connection to the database using a dialer. func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { c, err := NewConnector(dsn) if err != nil { return nil, err } c.Dialer(d) return c.open(context.Background()) } func (c *Connector) open(ctx context.Context) (cn *conn, err error) { // Handle any panics during connection initialization. Note that we // specifically do *not* want to use errRecover(), as that would turn any // connection errors into ErrBadConns, hiding the real error message from // the user. defer errRecoverNoErrBadConn(&err) // Create a new values map (copy). This makes it so maps in different // connections do not reference the same underlying data structure, so it // is safe for multiple connections to concurrently write to their opts. o := make(values) for k, v := range c.opts { o[k] = v } cn = &conn{ opts: o, dialer: c.dialer, } err = cn.handleDriverSettings(o) if err != nil { return nil, err } cn.handlePgpass(o) cn.c, err = dial(ctx, c.dialer, o) if err != nil { return nil, err } err = cn.ssl(o) if err != nil { if cn.c != nil { cn.c.Close() } return nil, err } // cn.startup panics on error. Make sure we don't leak cn.c. panicking := true defer func() { if panicking { cn.c.Close() } }() cn.buf = bufio.NewReader(cn.c) cn.startup(o) // reset the deadline, in case one was set (see dial) if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { err = cn.c.SetDeadline(time.Time{}) } panicking = false return cn, err } func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { network, address := network(o) // Zero or not specified means wait indefinitely. if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { seconds, err := strconv.ParseInt(timeout, 10, 0) if err != nil { return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) } duration := time.Duration(seconds) * time.Second // connect_timeout should apply to the entire connection establishment // procedure, so we both use a timeout for the TCP connection // establishment and set a deadline for doing the initial handshake. // The deadline is then reset after startup() is done. deadline := time.Now().Add(duration) var conn net.Conn if dctx, ok := d.(DialerContext); ok { ctx, cancel := context.WithTimeout(ctx, duration) defer cancel() conn, err = dctx.DialContext(ctx, network, address) } else { conn, err = d.DialTimeout(network, address, duration) } if err != nil { return nil, err } err = conn.SetDeadline(deadline) return conn, err } if dctx, ok := d.(DialerContext); ok { return dctx.DialContext(ctx, network, address) } return d.Dial(network, address) } func network(o values) (string, string) { host := o["host"] if strings.HasPrefix(host, "/") { sockPath := path.Join(host, ".s.PGSQL."+o["port"]) return "unix", sockPath } return "tcp", net.JoinHostPort(host, o["port"]) } type values map[string]string // scanner implements a tokenizer for libpq-style option strings. type scanner struct { s []rune i int } // newScanner returns a new scanner initialized with the option string s. func newScanner(s string) *scanner { return &scanner{[]rune(s), 0} } // Next returns the next rune. // It returns 0, false if the end of the text has been reached. func (s *scanner) Next() (rune, bool) { if s.i >= len(s.s) { return 0, false } r := s.s[s.i] s.i++ return r, true } // SkipSpaces returns the next non-whitespace rune. // It returns 0, false if the end of the text has been reached. func (s *scanner) SkipSpaces() (rune, bool) { r, ok := s.Next() for unicode.IsSpace(r) && ok { r, ok = s.Next() } return r, ok } // parseOpts parses the options from name and adds them to the values. // // The parsing code is based on conninfo_parse from libpq's fe-connect.c func parseOpts(name string, o values) error { s := newScanner(name) for { var ( keyRunes, valRunes []rune r rune ok bool ) if r, ok = s.SkipSpaces(); !ok { break } // Scan the key for !unicode.IsSpace(r) && r != '=' { keyRunes = append(keyRunes, r) if r, ok = s.Next(); !ok { break } } // Skip any whitespace if we're not at the = yet if r != '=' { r, ok = s.SkipSpaces() } // The current character should be = if r != '=' || !ok { return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) } // Skip any whitespace after the = if r, ok = s.SkipSpaces(); !ok { // If we reach the end here, the last value is just an empty string as per libpq. o[string(keyRunes)] = "" break } if r != '\'' { for !unicode.IsSpace(r) { if r == '\\' { if r, ok = s.Next(); !ok { return fmt.Errorf(`missing character after backslash`) } } valRunes = append(valRunes, r) if r, ok = s.Next(); !ok { break } } } else { quote: for { if r, ok = s.Next(); !ok { return fmt.Errorf(`unterminated quoted string literal in connection string`) } switch r { case '\'': break quote case '\\': r, _ = s.Next() fallthrough default: valRunes = append(valRunes, r) } } } o[string(keyRunes)] = string(valRunes) } return nil } func (cn *conn) isInTransaction() bool { return cn.txnStatus == txnStatusIdleInTransaction || cn.txnStatus == txnStatusInFailedTransaction } func (cn *conn) checkIsInTransaction(intxn bool) { if cn.isInTransaction() != intxn { cn.err.set(driver.ErrBadConn) errorf("unexpected transaction status %v", cn.txnStatus) } } func (cn *conn) Begin() (_ driver.Tx, err error) { return cn.begin("") } func (cn *conn) begin(mode string) (_ driver.Tx, err error) { if err := cn.err.get(); err != nil { return nil, err } defer cn.errRecover(&err) cn.checkIsInTransaction(false) _, commandTag, err := cn.simpleExec("BEGIN" + mode) if err != nil { return nil, err } if commandTag != "BEGIN" { cn.err.set(driver.ErrBadConn) return nil, fmt.Errorf("unexpected command tag %s", commandTag) } if cn.txnStatus != txnStatusIdleInTransaction { cn.err.set(driver.ErrBadConn) return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) } return cn, nil } func (cn *conn) closeTxn() { if finish := cn.txnFinish; finish != nil { finish() } } func (cn *conn) Commit() (err error) { defer cn.closeTxn() if err := cn.err.get(); err != nil { return err } defer cn.errRecover(&err) cn.checkIsInTransaction(true) // We don't want the client to think that everything is okay if it tries // to commit a failed transaction. However, no matter what we return, // database/sql will release this connection back into the free connection // pool so we have to abort the current transaction here. Note that you // would get the same behaviour if you issued a COMMIT in a failed // transaction, so it's also the least surprising thing to do here. if cn.txnStatus == txnStatusInFailedTransaction { if err := cn.rollback(); err != nil { return err } return ErrInFailedTransaction } _, commandTag, err := cn.simpleExec("COMMIT") if err != nil { if cn.isInTransaction() { cn.err.set(driver.ErrBadConn) } return err } if commandTag != "COMMIT" { cn.err.set(driver.ErrBadConn) return fmt.Errorf("unexpected command tag %s", commandTag) } cn.checkIsInTransaction(false) return nil } func (cn *conn) Rollback() (err error) { defer cn.closeTxn() if err := cn.err.get(); err != nil { return err } defer cn.errRecover(&err) return cn.rollback() } func (cn *conn) rollback() (err error) { cn.checkIsInTransaction(true) _, commandTag, err := cn.simpleExec("ROLLBACK") if err != nil { if cn.isInTransaction() { cn.err.set(driver.ErrBadConn) } return err } if commandTag != "ROLLBACK" { return fmt.Errorf("unexpected command tag %s", commandTag) } cn.checkIsInTransaction(false) return nil } func (cn *conn) gname() string { cn.namei++ return strconv.FormatInt(int64(cn.namei), 10) } func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { b := cn.writeBuf('Q') b.string(q) cn.send(b) for { t, r := cn.recv1() switch t { case 'C': res, commandTag = cn.parseComplete(r.string()) case 'Z': cn.processReadyForQuery(r) if res == nil && err == nil { err = errUnexpectedReady } // done return case 'E': err = parseError(r) case 'I': res = emptyRows case 'T', 'D': // ignore any results default: cn.err.set(driver.ErrBadConn) errorf("unknown response for simple query: %q", t) } } } func (cn *conn) simpleQuery(q string) (res *rows, err error) { defer cn.errRecover(&err) b := cn.writeBuf('Q') b.string(q) cn.send(b) for { t, r := cn.recv1() switch t { case 'C', 'I': // We allow queries which don't return any results through Query as // well as Exec. We still have to give database/sql a rows object // the user can close, though, to avoid connections from being // leaked. A "rows" with done=true works fine for that purpose. if err != nil { cn.err.set(driver.ErrBadConn) errorf("unexpected message %q in simple query execution", t) } if res == nil { res = &rows{ cn: cn, } } // Set the result and tag to the last command complete if there wasn't a // query already run. Although queries usually return from here and cede // control to Next, a query with zero results does not. if t == 'C' { res.result, res.tag = cn.parseComplete(r.string()) if res.colNames != nil { return } } res.done = true case 'Z': cn.processReadyForQuery(r) // done return case 'E': res = nil err = parseError(r) case 'D': if res == nil { cn.err.set(driver.ErrBadConn) errorf("unexpected DataRow in simple query execution") } // the query didn't fail; kick off to Next cn.saveMessage(t, r) return case 'T': // res might be non-nil here if we received a previous // CommandComplete, but that's fine; just overwrite it res = &rows{cn: cn} res.rowsHeader = parsePortalRowDescribe(r) // To work around a bug in QueryRow in Go 1.2 and earlier, wait // until the first DataRow has been received. default: cn.err.set(driver.ErrBadConn) errorf("unknown response for simple query: %q", t) } } } type noRows struct{} var emptyRows noRows var _ driver.Result = noRows{} func (noRows) LastInsertId() (int64, error) { return 0, errNoLastInsertID } func (noRows) RowsAffected() (int64, error) { return 0, errNoRowsAffected } // Decides which column formats to use for a prepared statement. The input is // an array of type oids, one element per result column. func decideColumnFormats( colTyps []fieldDesc, forceText bool, ) (colFmts []format, colFmtData []byte) { if len(colTyps) == 0 { return nil, colFmtDataAllText } colFmts = make([]format, len(colTyps)) if forceText { return colFmts, colFmtDataAllText } allBinary := true allText := true for i, t := range colTyps { switch t.OID { // This is the list of types to use binary mode for when receiving them // through a prepared statement. If a type appears in this list, it // must also be implemented in binaryDecode in encode.go. case oid.T_bytea: fallthrough case oid.T_int8: fallthrough case oid.T_int4: fallthrough case oid.T_int2: fallthrough case oid.T_uuid: colFmts[i] = formatBinary allText = false default: allBinary = false } } if allBinary { return colFmts, colFmtDataAllBinary } else if allText { return colFmts, colFmtDataAllText } else { colFmtData = make([]byte, 2+len(colFmts)*2) binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) for i, v := range colFmts { binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) } return colFmts, colFmtData } } func (cn *conn) prepareTo(q, stmtName string) *stmt { st := &stmt{cn: cn, name: stmtName} b := cn.writeBuf('P') b.string(st.name) b.string(q) b.int16(0) b.next('D') b.byte('S') b.string(st.name) b.next('S') cn.send(b) cn.readParseResponse() st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) cn.readReadyForQuery() return st } func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { if err := cn.err.get(); err != nil { return nil, err } defer cn.errRecover(&err) if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { s, err := cn.prepareCopyIn(q) if err == nil { cn.inCopy = true } return s, err } return cn.prepareTo(q, cn.gname()), nil } func (cn *conn) Close() (err error) { // Skip cn.bad return here because we always want to close a connection. defer cn.errRecover(&err) // Ensure that cn.c.Close is always run. Since error handling is done with // panics and cn.errRecover, the Close must be in a defer. defer func() { cerr := cn.c.Close() if err == nil { err = cerr } }() // Don't go through send(); ListenerConn relies on us not scribbling on the // scratch buffer of this connection. return cn.sendSimpleMessage('X') } // Implement the "Queryer" interface func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { return cn.query(query, args) } func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { if err := cn.err.get(); err != nil { return nil, err } if cn.inCopy { return nil, errCopyInProgress } defer cn.errRecover(&err) // Check to see if we can use the "simpleQuery" interface, which is // *much* faster than going through prepare/exec if len(args) == 0 { return cn.simpleQuery(query) } if cn.binaryParameters { cn.sendBinaryModeQuery(query, args) cn.readParseResponse() cn.readBindResponse() rows := &rows{cn: cn} rows.rowsHeader = cn.readPortalDescribeResponse() cn.postExecuteWorkaround() return rows, nil } st := cn.prepareTo(query, "") st.exec(args) return &rows{ cn: cn, rowsHeader: st.rowsHeader, }, nil } // Implement the optional "Execer" interface for one-shot queries func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { if err := cn.err.get(); err != nil { return nil, err } defer cn.errRecover(&err) // Check to see if we can use the "simpleExec" interface, which is // *much* faster than going through prepare/exec if len(args) == 0 { // ignore commandTag, our caller doesn't care r, _, err := cn.simpleExec(query) return r, err } if cn.binaryParameters { cn.sendBinaryModeQuery(query, args) cn.readParseResponse() cn.readBindResponse() cn.readPortalDescribeResponse() cn.postExecuteWorkaround() res, _, err = cn.readExecuteResponse("Execute") return res, err } // Use the unnamed statement to defer planning until bind // time, or else value-based selectivity estimates cannot be // used. st := cn.prepareTo(query, "") r, err := st.Exec(args) if err != nil { panic(err) } return r, err } type safeRetryError struct { Err error } func (se *safeRetryError) Error() string { return se.Err.Error() } func (cn *conn) send(m *writeBuf) { n, err := cn.c.Write(m.wrap()) if err != nil { if n == 0 { err = &safeRetryError{Err: err} } panic(err) } } func (cn *conn) sendStartupPacket(m *writeBuf) error { _, err := cn.c.Write((m.wrap())[1:]) return err } // Send a message of type typ to the server on the other end of cn. The // message should have no payload. This method does not use the scratch // buffer. func (cn *conn) sendSimpleMessage(typ byte) (err error) { _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) return err } // saveMessage memorizes a message and its buffer in the conn struct. // recvMessage will then return these values on the next call to it. This // method is useful in cases where you have to see what the next message is // going to be (e.g. to see whether it's an error or not) but you can't handle // the message yourself. func (cn *conn) saveMessage(typ byte, buf *readBuf) { if cn.saveMessageType != 0 { cn.err.set(driver.ErrBadConn) errorf("unexpected saveMessageType %d", cn.saveMessageType) } cn.saveMessageType = typ cn.saveMessageBuffer = *buf } // recvMessage receives any message from the backend, or returns an error if // a problem occurred while reading the message. func (cn *conn) recvMessage(r *readBuf) (byte, error) { // workaround for a QueryRow bug, see exec if cn.saveMessageType != 0 { t := cn.saveMessageType *r = cn.saveMessageBuffer cn.saveMessageType = 0 cn.saveMessageBuffer = nil return t, nil } x := cn.scratch[:5] _, err := io.ReadFull(cn.buf, x) if err != nil { return 0, err } // read the type and length of the message that follows t := x[0] n := int(binary.BigEndian.Uint32(x[1:])) - 4 var y []byte if n <= len(cn.scratch) { y = cn.scratch[:n] } else { y = make([]byte, n) } _, err = io.ReadFull(cn.buf, y) if err != nil { return 0, err } *r = y return t, nil } // recv receives a message from the backend, but if an error happened while // reading the message or the received message was an ErrorResponse, it panics. // NoticeResponses are ignored. This function should generally be used only // during the startup sequence. func (cn *conn) recv() (t byte, r *readBuf) { for { var err error r = &readBuf{} t, err = cn.recvMessage(r) if err != nil { panic(err) } switch t { case 'E': panic(parseError(r)) case 'N': if n := cn.noticeHandler; n != nil { n(parseError(r)) } case 'A': if n := cn.notificationHandler; n != nil { n(recvNotification(r)) } default: return } } } // recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by // the caller to avoid an allocation. func (cn *conn) recv1Buf(r *readBuf) byte { for { t, err := cn.recvMessage(r) if err != nil { panic(err) } switch t { case 'A': if n := cn.notificationHandler; n != nil { n(recvNotification(r)) } case 'N': if n := cn.noticeHandler; n != nil { n(parseError(r)) } case 'S': cn.processParameterStatus(r) default: return t } } } // recv1 receives a message from the backend, panicking if an error occurs // while attempting to read it. All asynchronous messages are ignored, with // the exception of ErrorResponse. func (cn *conn) recv1() (t byte, r *readBuf) { r = &readBuf{} t = cn.recv1Buf(r) return t, r } func (cn *conn) ssl(o values) error { upgrade, err := ssl(o) if err != nil { return err } if upgrade == nil { // Nothing to do return nil } w := cn.writeBuf(0) w.int32(80877103) if err = cn.sendStartupPacket(w); err != nil { return err } b := cn.scratch[:1] _, err = io.ReadFull(cn.c, b) if err != nil { return err } if b[0] != 'S' { return ErrSSLNotSupported } cn.c, err = upgrade(cn.c) return err } // isDriverSetting returns true iff a setting is purely for configuring the // driver's options and should not be sent to the server in the connection // startup packet. func isDriverSetting(key string) bool { switch key { case "host", "port": return true case "password": return true case "sslmode", "sslcert", "sslkey", "sslrootcert", "sslinline", "sslsni": return true case "fallback_application_name": return true case "connect_timeout": return true case "disable_prepared_binary_result": return true case "binary_parameters": return true case "krbsrvname": return true case "krbspn": return true default: return false } } func (cn *conn) startup(o values) { w := cn.writeBuf(0) w.int32(196608) // Send the backend the name of the database we want to connect to, and the // user we want to connect as. Additionally, we send over any run-time // parameters potentially included in the connection string. If the server // doesn't recognize any of them, it will reply with an error. for k, v := range o { if isDriverSetting(k) { // skip options which can't be run-time parameters continue } // The protocol requires us to supply the database name as "database" // instead of "dbname". if k == "dbname" { k = "database" } w.string(k) w.string(v) } w.string("") if err := cn.sendStartupPacket(w); err != nil { panic(err) } for { t, r := cn.recv() switch t { case 'K': cn.processBackendKeyData(r) case 'S': cn.processParameterStatus(r) case 'R': cn.auth(r, o) case 'Z': cn.processReadyForQuery(r) return default: errorf("unknown response for startup: %q", t) } } } func (cn *conn) auth(r *readBuf, o values) { switch code := r.int32(); code { case 0: // OK case 3: w := cn.writeBuf('p') w.string(o["password"]) cn.send(w) t, r := cn.recv() if t != 'R' { errorf("unexpected password response: %q", t) } if r.int32() != 0 { errorf("unexpected authentication response: %q", t) } case 5: s := string(r.next(4)) w := cn.writeBuf('p') w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) cn.send(w) t, r := cn.recv() if t != 'R' { errorf("unexpected password response: %q", t) } if r.int32() != 0 { errorf("unexpected authentication response: %q", t) } case 7: // GSSAPI, startup if newGss == nil { errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") } cli, err := newGss() if err != nil { errorf("kerberos error: %s", err.Error()) } var token []byte if spn, ok := o["krbspn"]; ok { // Use the supplied SPN if provided.. token, err = cli.GetInitTokenFromSpn(spn) } else { // Allow the kerberos service name to be overridden service := "postgres" if val, ok := o["krbsrvname"]; ok { service = val } token, err = cli.GetInitToken(o["host"], service) } if err != nil { errorf("failed to get Kerberos ticket: %q", err) } w := cn.writeBuf('p') w.bytes(token) cn.send(w) // Store for GSSAPI continue message cn.gss = cli case 8: // GSSAPI continue if cn.gss == nil { errorf("GSSAPI protocol error") } b := []byte(*r) done, tokOut, err := cn.gss.Continue(b) if err == nil && !done { w := cn.writeBuf('p') w.bytes(tokOut) cn.send(w) } // Errors fall through and read the more detailed message // from the server.. case 10: sc := scram.NewClient(sha256.New, o["user"], o["password"]) sc.Step(nil) if sc.Err() != nil { errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) } scOut := sc.Out() w := cn.writeBuf('p') w.string("SCRAM-SHA-256") w.int32(len(scOut)) w.bytes(scOut) cn.send(w) t, r := cn.recv() if t != 'R' { errorf("unexpected password response: %q", t) } if r.int32() != 11 { errorf("unexpected authentication response: %q", t) } nextStep := r.next(len(*r)) sc.Step(nextStep) if sc.Err() != nil { errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) } scOut = sc.Out() w = cn.writeBuf('p') w.bytes(scOut) cn.send(w) t, r = cn.recv() if t != 'R' { errorf("unexpected password response: %q", t) } if r.int32() != 12 { errorf("unexpected authentication response: %q", t) } nextStep = r.next(len(*r)) sc.Step(nextStep) if sc.Err() != nil { errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) } default: errorf("unknown authentication response: %d", code) } } type format int const formatText format = 0 const formatBinary format = 1 // One result-column format code with the value 1 (i.e. all binary). var colFmtDataAllBinary = []byte{0, 1, 0, 1} // No result-column format codes (i.e. all text). var colFmtDataAllText = []byte{0, 0} type stmt struct { cn *conn name string rowsHeader colFmtData []byte paramTyps []oid.Oid closed bool } func (st *stmt) Close() (err error) { if st.closed { return nil } if err := st.cn.err.get(); err != nil { return err } defer st.cn.errRecover(&err) w := st.cn.writeBuf('C') w.byte('S') w.string(st.name) st.cn.send(w) st.cn.send(st.cn.writeBuf('S')) t, _ := st.cn.recv1() if t != '3' { st.cn.err.set(driver.ErrBadConn) errorf("unexpected close response: %q", t) } st.closed = true t, r := st.cn.recv1() if t != 'Z' { st.cn.err.set(driver.ErrBadConn) errorf("expected ready for query, but got: %q", t) } st.cn.processReadyForQuery(r) return nil } func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { return st.query(v) } func (st *stmt) query(v []driver.Value) (r *rows, err error) { if err := st.cn.err.get(); err != nil { return nil, err } defer st.cn.errRecover(&err) st.exec(v) return &rows{ cn: st.cn, rowsHeader: st.rowsHeader, }, nil } func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { if err := st.cn.err.get(); err != nil { return nil, err } defer st.cn.errRecover(&err) st.exec(v) res, _, err = st.cn.readExecuteResponse("simple query") return res, err } func (st *stmt) exec(v []driver.Value) { if len(v) >= 65536 { errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) } if len(v) != len(st.paramTyps) { errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) } cn := st.cn w := cn.writeBuf('B') w.byte(0) // unnamed portal w.string(st.name) if cn.binaryParameters { cn.sendBinaryParameters(w, v) } else { w.int16(0) w.int16(len(v)) for i, x := range v { if x == nil { w.int32(-1) } else { b := encode(&cn.parameterStatus, x, st.paramTyps[i]) w.int32(len(b)) w.bytes(b) } } } w.bytes(st.colFmtData) w.next('E') w.byte(0) w.int32(0) w.next('S') cn.send(w) cn.readBindResponse() cn.postExecuteWorkaround() } func (st *stmt) NumInput() int { return len(st.paramTyps) } // parseComplete parses the "command tag" from a CommandComplete message, and // returns the number of rows affected (if applicable) and a string // identifying only the command that was executed, e.g. "ALTER TABLE". If the // command tag could not be parsed, parseComplete panics. func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { commandsWithAffectedRows := []string{ "SELECT ", // INSERT is handled below "UPDATE ", "DELETE ", "FETCH ", "MOVE ", "COPY ", } var affectedRows *string for _, tag := range commandsWithAffectedRows { if strings.HasPrefix(commandTag, tag) { t := commandTag[len(tag):] affectedRows = &t commandTag = tag[:len(tag)-1] break } } // INSERT also includes the oid of the inserted row in its command tag. // Oids in user tables are deprecated, and the oid is only returned when // exactly one row is inserted, so it's unlikely to be of value to any // real-world application and we can ignore it. if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { parts := strings.Split(commandTag, " ") if len(parts) != 3 { cn.err.set(driver.ErrBadConn) errorf("unexpected INSERT command tag %s", commandTag) } affectedRows = &parts[len(parts)-1] commandTag = "INSERT" } // There should be no affected rows attached to the tag, just return it if affectedRows == nil { return driver.RowsAffected(0), commandTag } n, err := strconv.ParseInt(*affectedRows, 10, 64) if err != nil { cn.err.set(driver.ErrBadConn) errorf("could not parse commandTag: %s", err) } return driver.RowsAffected(n), commandTag } type rowsHeader struct { colNames []string colTyps []fieldDesc colFmts []format } type rows struct { cn *conn finish func() rowsHeader done bool rb readBuf result driver.Result tag string next *rowsHeader } func (rs *rows) Close() error { if finish := rs.finish; finish != nil { defer finish() } // no need to look at cn.bad as Next() will for { err := rs.Next(nil) switch err { case nil: case io.EOF: // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row // description, used with HasNextResultSet). We need to fetch messages until // we hit a 'Z', which is done by waiting for done to be set. if rs.done { return nil } default: return err } } } func (rs *rows) Columns() []string { return rs.colNames } func (rs *rows) Result() driver.Result { if rs.result == nil { return emptyRows } return rs.result } func (rs *rows) Tag() string { return rs.tag } func (rs *rows) Next(dest []driver.Value) (err error) { if rs.done { return io.EOF } conn := rs.cn if err := conn.err.getForNext(); err != nil { return err } defer conn.errRecover(&err) for { t := conn.recv1Buf(&rs.rb) switch t { case 'E': err = parseError(&rs.rb) case 'C', 'I': if t == 'C' { rs.result, rs.tag = conn.parseComplete(rs.rb.string()) } continue case 'Z': conn.processReadyForQuery(&rs.rb) rs.done = true if err != nil { return err } return io.EOF case 'D': n := rs.rb.int16() if err != nil { conn.err.set(driver.ErrBadConn) errorf("unexpected DataRow after error %s", err) } if n < len(dest) { dest = dest[:n] } for i := range dest { l := rs.rb.int32() if l == -1 { dest[i] = nil continue } dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) } return case 'T': next := parsePortalRowDescribe(&rs.rb) rs.next = &next return io.EOF default: errorf("unexpected message after execute: %q", t) } } } func (rs *rows) HasNextResultSet() bool { hasNext := rs.next != nil && !rs.done return hasNext } func (rs *rows) NextResultSet() error { if rs.next == nil { return io.EOF } rs.rowsHeader = *rs.next rs.next = nil return nil } // QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be // used as part of an SQL statement. For example: // // tblname := "my_table" // data := "my_data" // quoted := pq.QuoteIdentifier(tblname) // err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) // // Any double quotes in name will be escaped. The quoted identifier will be // case sensitive when used in a query. If the input string contains a zero // byte, the result will be truncated immediately before it. func QuoteIdentifier(name string) string { end := strings.IndexRune(name, 0) if end > -1 { name = name[:end] } return `"` + strings.Replace(name, `"`, `""`, -1) + `"` } // BufferQuoteIdentifier satisfies the same purpose as QuoteIdentifier, but backed by a // byte buffer. func BufferQuoteIdentifier(name string, buffer *bytes.Buffer) { end := strings.IndexRune(name, 0) if end > -1 { name = name[:end] } buffer.WriteRune('"') buffer.WriteString(strings.Replace(name, `"`, `""`, -1)) buffer.WriteRune('"') } // QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal // to DDL and other statements that do not accept parameters) to be used as part // of an SQL statement. For example: // // exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") // err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) // // Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be // replaced by two backslashes (i.e. "\\") and the C-style escape identifier // that PostgreSQL provides ('E') will be prepended to the string. func QuoteLiteral(literal string) string { // This follows the PostgreSQL internal algorithm for handling quoted literals // from libpq, which can be found in the "PQEscapeStringInternal" function, // which is found in the libpq/fe-exec.c source file: // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c // // substitute any single-quotes (') with two single-quotes ('') literal = strings.Replace(literal, `'`, `''`, -1) // determine if the string has any backslashes (\) in it. // if it does, replace any backslashes (\) with two backslashes (\\) // then, we need to wrap the entire string with a PostgreSQL // C-style escape. Per how "PQEscapeStringInternal" handles this case, we // also add a space before the "E" if strings.Contains(literal, `\`) { literal = strings.Replace(literal, `\`, `\\`, -1) literal = ` E'` + literal + `'` } else { // otherwise, we can just wrap the literal with a pair of single quotes literal = `'` + literal + `'` } return literal } func md5s(s string) string { h := md5.New() h.Write([]byte(s)) return fmt.Sprintf("%x", h.Sum(nil)) } func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { // Do one pass over the parameters to see if we're going to send any of // them over in binary. If we are, create a paramFormats array at the // same time. var paramFormats []int for i, x := range args { _, ok := x.([]byte) if ok { if paramFormats == nil { paramFormats = make([]int, len(args)) } paramFormats[i] = 1 } } if paramFormats == nil { b.int16(0) } else { b.int16(len(paramFormats)) for _, x := range paramFormats { b.int16(x) } } b.int16(len(args)) for _, x := range args { if x == nil { b.int32(-1) } else { datum := binaryEncode(&cn.parameterStatus, x) b.int32(len(datum)) b.bytes(datum) } } } func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { if len(args) >= 65536 { errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) } b := cn.writeBuf('P') b.byte(0) // unnamed statement b.string(query) b.int16(0) b.next('B') b.int16(0) // unnamed portal and statement cn.sendBinaryParameters(b, args) b.bytes(colFmtDataAllText) b.next('D') b.byte('P') b.byte(0) // unnamed portal b.next('E') b.byte(0) b.int32(0) b.next('S') cn.send(b) } func (cn *conn) processParameterStatus(r *readBuf) { var err error param := r.string() switch param { case "server_version": var major1 int var major2 int _, err = fmt.Sscanf(r.string(), "%d.%d", &major1, &major2) if err == nil { cn.parameterStatus.serverVersion = major1*10000 + major2*100 } case "TimeZone": cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) if err != nil { cn.parameterStatus.currentLocation = nil } default: // ignore } } func (cn *conn) processReadyForQuery(r *readBuf) { cn.txnStatus = transactionStatus(r.byte()) } func (cn *conn) readReadyForQuery() { t, r := cn.recv1() switch t { case 'Z': cn.processReadyForQuery(r) return default: cn.err.set(driver.ErrBadConn) errorf("unexpected message %q; expected ReadyForQuery", t) } } func (cn *conn) processBackendKeyData(r *readBuf) { cn.processID = r.int32() cn.secretKey = r.int32() } func (cn *conn) readParseResponse() { t, r := cn.recv1() switch t { case '1': return case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: cn.err.set(driver.ErrBadConn) errorf("unexpected Parse response %q", t) } } func (cn *conn) readStatementDescribeResponse() ( paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc, ) { for { t, r := cn.recv1() switch t { case 't': nparams := r.int16() paramTyps = make([]oid.Oid, nparams) for i := range paramTyps { paramTyps[i] = r.oid() } case 'n': return paramTyps, nil, nil case 'T': colNames, colTyps = parseStatementRowDescribe(r) return paramTyps, colNames, colTyps case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: cn.err.set(driver.ErrBadConn) errorf("unexpected Describe statement response %q", t) } } } func (cn *conn) readPortalDescribeResponse() rowsHeader { t, r := cn.recv1() switch t { case 'T': return parsePortalRowDescribe(r) case 'n': return rowsHeader{} case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: cn.err.set(driver.ErrBadConn) errorf("unexpected Describe response %q", t) } panic("not reached") } func (cn *conn) readBindResponse() { t, r := cn.recv1() switch t { case '2': return case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) default: cn.err.set(driver.ErrBadConn) errorf("unexpected Bind response %q", t) } } func (cn *conn) postExecuteWorkaround() { // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores // any errors from rows.Next, which masks errors that happened during the // execution of the query. To avoid the problem in common cases, we wait // here for one more message from the database. If it's not an error the // query will likely succeed (or perhaps has already, if it's a // CommandComplete), so we push the message into the conn struct; recv1 // will return it as the next message for rows.Next or rows.Close. // However, if it's an error, we wait until ReadyForQuery and then return // the error to our caller. for { t, r := cn.recv1() switch t { case 'E': err := parseError(r) cn.readReadyForQuery() panic(err) case 'C', 'D', 'I': // the query didn't fail, but we can't process this message cn.saveMessage(t, r) return default: cn.err.set(driver.ErrBadConn) errorf("unexpected message during extended query execution: %q", t) } } } // Only for Exec(), since we ignore the returned data func (cn *conn) readExecuteResponse( protocolState string, ) (res driver.Result, commandTag string, err error) { for { t, r := cn.recv1() switch t { case 'C': if err != nil { cn.err.set(driver.ErrBadConn) errorf("unexpected CommandComplete after error %s", err) } res, commandTag = cn.parseComplete(r.string()) case 'Z': cn.processReadyForQuery(r) if res == nil && err == nil { err = errUnexpectedReady } return res, commandTag, err case 'E': err = parseError(r) case 'T', 'D', 'I': if err != nil { cn.err.set(driver.ErrBadConn) errorf("unexpected %q after error %s", t, err) } if t == 'I' { res = emptyRows } // ignore any results default: cn.err.set(driver.ErrBadConn) errorf("unknown %s response: %q", protocolState, t) } } } func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { n := r.int16() colNames = make([]string, n) colTyps = make([]fieldDesc, n) for i := range colNames { colNames[i] = r.string() r.next(6) colTyps[i].OID = r.oid() colTyps[i].Len = r.int16() colTyps[i].Mod = r.int32() // format code not known when describing a statement; always 0 r.next(2) } return } func parsePortalRowDescribe(r *readBuf) rowsHeader { n := r.int16() colNames := make([]string, n) colFmts := make([]format, n) colTyps := make([]fieldDesc, n) for i := range colNames { colNames[i] = r.string() r.next(6) colTyps[i].OID = r.oid() colTyps[i].Len = r.int16() colTyps[i].Mod = r.int32() colFmts[i] = format(r.int16()) } return rowsHeader{ colNames: colNames, colFmts: colFmts, colTyps: colTyps, } } // parseEnviron tries to mimic some of libpq's environment handling // // To ease testing, it does not directly reference os.Environ, but is // designed to accept its output. // // Environment-set connection information is intended to have a higher // precedence than a library default but lower than any explicitly // passed information (such as in the URL or connection string). func parseEnviron(env []string) (out map[string]string) { out = make(map[string]string) for _, v := range env { parts := strings.SplitN(v, "=", 2) accrue := func(keyname string) { out[keyname] = parts[1] } unsupported := func() { panic(fmt.Sprintf("setting %v not supported", parts[0])) } // The order of these is the same as is seen in the // PostgreSQL 9.1 manual. Unsupported but well-defined // keys cause a panic; these should be unset prior to // execution. Options which pq expects to be set to a // certain value are allowed, but must be set to that // value if present (they can, of course, be absent). switch parts[0] { case "PGHOST": accrue("host") case "PGHOSTADDR": unsupported() case "PGPORT": accrue("port") case "PGDATABASE": accrue("dbname") case "PGUSER": accrue("user") case "PGPASSWORD": accrue("password") case "PGSERVICE", "PGSERVICEFILE", "PGREALM": unsupported() case "PGOPTIONS": accrue("options") case "PGAPPNAME": accrue("application_name") case "PGSSLMODE": accrue("sslmode") case "PGSSLCERT": accrue("sslcert") case "PGSSLKEY": accrue("sslkey") case "PGSSLROOTCERT": accrue("sslrootcert") case "PGSSLSNI": accrue("sslsni") case "PGREQUIRESSL", "PGSSLCRL": unsupported() case "PGREQUIREPEER": unsupported() case "PGKRBSRVNAME", "PGGSSLIB": unsupported() case "PGCONNECT_TIMEOUT": accrue("connect_timeout") case "PGCLIENTENCODING": accrue("client_encoding") case "PGDATESTYLE": accrue("datestyle") case "PGTZ": accrue("timezone") case "PGGEQO": accrue("geqo") case "PGSYSCONFDIR", "PGLOCALEDIR": unsupported() } } return out } // isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". func isUTF8(name string) bool { // Recognize all sorts of silly things as "UTF-8", like Postgres does s := strings.Map(alnumLowerASCII, name) return s == "utf8" || s == "unicode" } func alnumLowerASCII(ch rune) rune { if 'A' <= ch && ch <= 'Z' { return ch + ('a' - 'A') } if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { return ch } return -1 // discard } // The database/sql/driver package says: // All Conn implementations should implement the following interfaces: Pinger, SessionResetter, and Validator. var _ driver.Pinger = &conn{} var _ driver.SessionResetter = &conn{} func (cn *conn) ResetSession(ctx context.Context) error { // Ensure bad connections are reported: From database/sql/driver: // If a connection is never returned to the connection pool but immediately reused, then // ResetSession is called prior to reuse but IsValid is not called. return cn.err.get() } func (cn *conn) IsValid() bool { return cn.err.get() == nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/array_test.go0000644000000000000000000012172215024302467022440 0ustar rootrootpackage pq import ( "bytes" "database/sql" "database/sql/driver" "math/rand" "reflect" "strings" "testing" ) func TestParseArray(t *testing.T) { for _, tt := range []struct { input string delim string dims []int elems [][]byte }{ {`{}`, `,`, nil, [][]byte{}}, {`{NULL}`, `,`, []int{1}, [][]byte{nil}}, {`{a}`, `,`, []int{1}, [][]byte{{'a'}}}, {`{a,b}`, `,`, []int{2}, [][]byte{{'a'}, {'b'}}}, {`{{a,b}}`, `,`, []int{1, 2}, [][]byte{{'a'}, {'b'}}}, {`{{a},{b}}`, `,`, []int{2, 1}, [][]byte{{'a'}, {'b'}}}, {`{{{a,b},{c,d},{e,f}}}`, `,`, []int{1, 3, 2}, [][]byte{ {'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}, }}, {`{""}`, `,`, []int{1}, [][]byte{{}}}, {`{","}`, `,`, []int{1}, [][]byte{{','}}}, {`{",",","}`, `,`, []int{2}, [][]byte{{','}, {','}}}, {`{{",",","}}`, `,`, []int{1, 2}, [][]byte{{','}, {','}}}, {`{{","},{","}}`, `,`, []int{2, 1}, [][]byte{{','}, {','}}}, {`{{{",",","},{",",","},{",",","}}}`, `,`, []int{1, 3, 2}, [][]byte{ {','}, {','}, {','}, {','}, {','}, {','}, }}, {`{"\"}"}`, `,`, []int{1}, [][]byte{{'"', '}'}}}, {`{"\"","\""}`, `,`, []int{2}, [][]byte{{'"'}, {'"'}}}, {`{{"\"","\""}}`, `,`, []int{1, 2}, [][]byte{{'"'}, {'"'}}}, {`{{"\""},{"\""}}`, `,`, []int{2, 1}, [][]byte{{'"'}, {'"'}}}, {`{{{"\"","\""},{"\"","\""},{"\"","\""}}}`, `,`, []int{1, 3, 2}, [][]byte{ {'"'}, {'"'}, {'"'}, {'"'}, {'"'}, {'"'}, }}, {`{axyzb}`, `xyz`, []int{2}, [][]byte{{'a'}, {'b'}}}, } { dims, elems, err := parseArray([]byte(tt.input), []byte(tt.delim)) if err != nil { t.Fatalf("Expected no error for %q, got %q", tt.input, err) } if !reflect.DeepEqual(dims, tt.dims) { t.Errorf("Expected %v dimensions for %q, got %v", tt.dims, tt.input, dims) } if !reflect.DeepEqual(elems, tt.elems) { t.Errorf("Expected %v elements for %q, got %v", tt.elems, tt.input, elems) } } } func TestParseArrayError(t *testing.T) { for _, tt := range []struct { input, err string }{ {``, "expected '{' at offset 0"}, {`x`, "expected '{' at offset 0"}, {`}`, "expected '{' at offset 0"}, {`{`, "expected '}' at offset 1"}, {`{{}`, "expected '}' at offset 3"}, {`{}}`, "unexpected '}' at offset 2"}, {`{,}`, "unexpected ',' at offset 1"}, {`{,x}`, "unexpected ',' at offset 1"}, {`{x,}`, "unexpected '}' at offset 3"}, {`{x,{`, "unexpected '{' at offset 3"}, {`{x},`, "unexpected ',' at offset 3"}, {`{x}}`, "unexpected '}' at offset 3"}, {`{{x}`, "expected '}' at offset 4"}, {`{""x}`, "unexpected 'x' at offset 3"}, {`{{a},{b,c}}`, "multidimensional arrays must have elements with matching dimensions"}, } { _, _, err := parseArray([]byte(tt.input), []byte{','}) if err == nil { t.Fatalf("Expected error for %q, got none", tt.input) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) } } } func TestArrayScanner(t *testing.T) { var s sql.Scanner = Array(&[]bool{}) if _, ok := s.(*BoolArray); !ok { t.Errorf("Expected *BoolArray, got %T", s) } s = Array(&[]float64{}) if _, ok := s.(*Float64Array); !ok { t.Errorf("Expected *Float64Array, got %T", s) } s = Array(&[]int64{}) if _, ok := s.(*Int64Array); !ok { t.Errorf("Expected *Int64Array, got %T", s) } s = Array(&[]float32{}) if _, ok := s.(*Float32Array); !ok { t.Errorf("Expected *Float32Array, got %T", s) } s = Array(&[]int32{}) if _, ok := s.(*Int32Array); !ok { t.Errorf("Expected *Int32Array, got %T", s) } s = Array(&[]string{}) if _, ok := s.(*StringArray); !ok { t.Errorf("Expected *StringArray, got %T", s) } s = Array(&[][]byte{}) if _, ok := s.(*ByteaArray); !ok { t.Errorf("Expected *ByteaArray, got %T", s) } for _, tt := range []interface{}{ &[]sql.Scanner{}, &[][]bool{}, &[][]float64{}, &[][]int64{}, &[][]float32{}, &[][]int32{}, &[][]string{}, } { s = Array(tt) if _, ok := s.(GenericArray); !ok { t.Errorf("Expected GenericArray for %T, got %T", tt, s) } } } func TestArrayValuer(t *testing.T) { var v driver.Valuer = Array([]bool{}) if _, ok := v.(*BoolArray); !ok { t.Errorf("Expected *BoolArray, got %T", v) } v = Array([]float64{}) if _, ok := v.(*Float64Array); !ok { t.Errorf("Expected *Float64Array, got %T", v) } v = Array([]int64{}) if _, ok := v.(*Int64Array); !ok { t.Errorf("Expected *Int64Array, got %T", v) } v = Array([]float32{}) if _, ok := v.(*Float32Array); !ok { t.Errorf("Expected *Float32Array, got %T", v) } v = Array([]int32{}) if _, ok := v.(*Int32Array); !ok { t.Errorf("Expected *Int32Array, got %T", v) } v = Array([]string{}) if _, ok := v.(*StringArray); !ok { t.Errorf("Expected *StringArray, got %T", v) } v = Array([][]byte{}) if _, ok := v.(*ByteaArray); !ok { t.Errorf("Expected *ByteaArray, got %T", v) } for _, tt := range []interface{}{ nil, []driver.Value{}, [][]bool{}, [][]float64{}, [][]int64{}, [][]float32{}, [][]int32{}, [][]string{}, } { v = Array(tt) if _, ok := v.(GenericArray); !ok { t.Errorf("Expected GenericArray for %T, got %T", tt, v) } } } func TestBoolArrayScanUnsupported(t *testing.T) { var arr BoolArray err := arr.Scan(1) if err == nil { t.Fatal("Expected error when scanning from int") } if !strings.Contains(err.Error(), "int to BoolArray") { t.Errorf("Expected type to be mentioned when scanning, got %q", err) } } func TestBoolArrayScanEmpty(t *testing.T) { var arr BoolArray err := arr.Scan(`{}`) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr == nil || len(arr) != 0 { t.Errorf("Expected empty, got %#v", arr) } } func TestBoolArrayScanNil(t *testing.T) { arr := BoolArray{true, true, true} err := arr.Scan(nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr != nil { t.Errorf("Expected nil, got %+v", arr) } } var BoolArrayStringTests = []struct { str string arr BoolArray }{ {`{}`, BoolArray{}}, {`{t}`, BoolArray{true}}, {`{f,t}`, BoolArray{false, true}}, } func TestBoolArrayScanBytes(t *testing.T) { for _, tt := range BoolArrayStringTests { bytes := []byte(tt.str) arr := BoolArray{true, true, true} err := arr.Scan(bytes) if err != nil { t.Fatalf("Expected no error for %q, got %v", bytes, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) } } } func BenchmarkBoolArrayScanBytes(b *testing.B) { var a BoolArray var x interface{} = []byte(`{t,f,t,f,t,f,t,f,t,f}`) for i := 0; i < b.N; i++ { a = BoolArray{} a.Scan(x) } } func TestBoolArrayScanString(t *testing.T) { for _, tt := range BoolArrayStringTests { arr := BoolArray{true, true, true} err := arr.Scan(tt.str) if err != nil { t.Fatalf("Expected no error for %q, got %v", tt.str, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) } } } func TestBoolArrayScanError(t *testing.T) { for _, tt := range []struct { input, err string }{ {``, "unable to parse array"}, {`{`, "unable to parse array"}, {`{{t},{f}}`, "cannot convert ARRAY[2][1] to BoolArray"}, {`{NULL}`, `could not parse boolean array index 0: invalid boolean ""`}, {`{a}`, `could not parse boolean array index 0: invalid boolean "a"`}, {`{t,b}`, `could not parse boolean array index 1: invalid boolean "b"`}, {`{t,f,cd}`, `could not parse boolean array index 2: invalid boolean "cd"`}, } { arr := BoolArray{true, true, true} err := arr.Scan(tt.input) if err == nil { t.Fatalf("Expected error for %q, got none", tt.input) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) } if !reflect.DeepEqual(arr, BoolArray{true, true, true}) { t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) } } } func TestBoolArrayValue(t *testing.T) { result, err := BoolArray(nil).Value() if err != nil { t.Fatalf("Expected no error for nil, got %v", err) } if result != nil { t.Errorf("Expected nil, got %q", result) } result, err = BoolArray([]bool{}).Value() if err != nil { t.Fatalf("Expected no error for empty, got %v", err) } if expected := `{}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected empty, got %q", result) } result, err = BoolArray([]bool{false, true, false}).Value() if err != nil { t.Fatalf("Expected no error, got %v", err) } if expected := `{f,t,f}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected %q, got %q", expected, result) } } func BenchmarkBoolArrayValue(b *testing.B) { rand.Seed(1) x := make([]bool, 10) for i := 0; i < len(x); i++ { x[i] = rand.Intn(2) == 0 } a := BoolArray(x) for i := 0; i < b.N; i++ { a.Value() } } func TestByteaArrayScanUnsupported(t *testing.T) { var arr ByteaArray err := arr.Scan(1) if err == nil { t.Fatal("Expected error when scanning from int") } if !strings.Contains(err.Error(), "int to ByteaArray") { t.Errorf("Expected type to be mentioned when scanning, got %q", err) } } func TestByteaArrayScanEmpty(t *testing.T) { var arr ByteaArray err := arr.Scan(`{}`) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr == nil || len(arr) != 0 { t.Errorf("Expected empty, got %#v", arr) } } func TestByteaArrayScanNil(t *testing.T) { arr := ByteaArray{{2}, {6}, {0, 0}} err := arr.Scan(nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr != nil { t.Errorf("Expected nil, got %+v", arr) } } var ByteaArrayStringTests = []struct { str string arr ByteaArray }{ {`{}`, ByteaArray{}}, {`{NULL}`, ByteaArray{nil}}, {`{"\\xfeff"}`, ByteaArray{{'\xFE', '\xFF'}}}, {`{"\\xdead","\\xbeef"}`, ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}}, } func TestByteaArrayScanBytes(t *testing.T) { for _, tt := range ByteaArrayStringTests { bytes := []byte(tt.str) arr := ByteaArray{{2}, {6}, {0, 0}} err := arr.Scan(bytes) if err != nil { t.Fatalf("Expected no error for %q, got %v", bytes, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) } } } func BenchmarkByteaArrayScanBytes(b *testing.B) { var a ByteaArray var x interface{} = []byte(`{"\\xfe","\\xff","\\xdead","\\xbeef","\\xfe","\\xff","\\xdead","\\xbeef","\\xfe","\\xff"}`) for i := 0; i < b.N; i++ { a = ByteaArray{} a.Scan(x) } } func TestByteaArrayScanString(t *testing.T) { for _, tt := range ByteaArrayStringTests { arr := ByteaArray{{2}, {6}, {0, 0}} err := arr.Scan(tt.str) if err != nil { t.Fatalf("Expected no error for %q, got %v", tt.str, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) } } } func TestByteaArrayScanError(t *testing.T) { for _, tt := range []struct { input, err string }{ {``, "unable to parse array"}, {`{`, "unable to parse array"}, {`{{"\\xfeff"},{"\\xbeef"}}`, "cannot convert ARRAY[2][1] to ByteaArray"}, {`{"\\abc"}`, "could not parse bytea array index 0: could not parse bytea value"}, } { arr := ByteaArray{{2}, {6}, {0, 0}} err := arr.Scan(tt.input) if err == nil { t.Fatalf("Expected error for %q, got none", tt.input) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) } if !reflect.DeepEqual(arr, ByteaArray{{2}, {6}, {0, 0}}) { t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) } } } func TestByteaArrayValue(t *testing.T) { result, err := ByteaArray(nil).Value() if err != nil { t.Fatalf("Expected no error for nil, got %v", err) } if result != nil { t.Errorf("Expected nil, got %q", result) } result, err = ByteaArray([][]byte{}).Value() if err != nil { t.Fatalf("Expected no error for empty, got %v", err) } if expected := `{}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected empty, got %q", result) } result, err = ByteaArray([][]byte{{'\xDE', '\xAD', '\xBE', '\xEF'}, {'\xFE', '\xFF'}, {}}).Value() if err != nil { t.Fatalf("Expected no error, got %v", err) } if expected := `{"\\xdeadbeef","\\xfeff","\\x"}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected %q, got %q", expected, result) } } func BenchmarkByteaArrayValue(b *testing.B) { rand.Seed(1) x := make([][]byte, 10) for i := 0; i < len(x); i++ { x[i] = make([]byte, len(x)) for j := 0; j < len(x); j++ { x[i][j] = byte(rand.Int()) } } a := ByteaArray(x) for i := 0; i < b.N; i++ { a.Value() } } func TestFloat64ArrayScanUnsupported(t *testing.T) { var arr Float64Array err := arr.Scan(true) if err == nil { t.Fatal("Expected error when scanning from bool") } if !strings.Contains(err.Error(), "bool to Float64Array") { t.Errorf("Expected type to be mentioned when scanning, got %q", err) } } func TestFloat64ArrayScanEmpty(t *testing.T) { var arr Float64Array err := arr.Scan(`{}`) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr == nil || len(arr) != 0 { t.Errorf("Expected empty, got %#v", arr) } } func TestFloat64ArrayScanNil(t *testing.T) { arr := Float64Array{5, 5, 5} err := arr.Scan(nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr != nil { t.Errorf("Expected nil, got %+v", arr) } } var Float64ArrayStringTests = []struct { str string arr Float64Array }{ {`{}`, Float64Array{}}, {`{1.2}`, Float64Array{1.2}}, {`{3.456,7.89}`, Float64Array{3.456, 7.89}}, {`{3,1,2}`, Float64Array{3, 1, 2}}, } func TestFloat64ArrayScanBytes(t *testing.T) { for _, tt := range Float64ArrayStringTests { bytes := []byte(tt.str) arr := Float64Array{5, 5, 5} err := arr.Scan(bytes) if err != nil { t.Fatalf("Expected no error for %q, got %v", bytes, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) } } } func BenchmarkFloat64ArrayScanBytes(b *testing.B) { var a Float64Array var x interface{} = []byte(`{1.2,3.4,5.6,7.8,9.01,2.34,5.67,8.90,1.234,5.678}`) for i := 0; i < b.N; i++ { a = Float64Array{} a.Scan(x) } } func TestFloat64ArrayScanString(t *testing.T) { for _, tt := range Float64ArrayStringTests { arr := Float64Array{5, 5, 5} err := arr.Scan(tt.str) if err != nil { t.Fatalf("Expected no error for %q, got %v", tt.str, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) } } } func TestFloat64ArrayScanError(t *testing.T) { for _, tt := range []struct { input, err string }{ {``, "unable to parse array"}, {`{`, "unable to parse array"}, {`{{5.6},{7.8}}`, "cannot convert ARRAY[2][1] to Float64Array"}, {`{NULL}`, "parsing array element index 0:"}, {`{a}`, "parsing array element index 0:"}, {`{5.6,a}`, "parsing array element index 1:"}, {`{5.6,7.8,a}`, "parsing array element index 2:"}, } { arr := Float64Array{5, 5, 5} err := arr.Scan(tt.input) if err == nil { t.Fatalf("Expected error for %q, got none", tt.input) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) } if !reflect.DeepEqual(arr, Float64Array{5, 5, 5}) { t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) } } } func TestFloat64ArrayValue(t *testing.T) { result, err := Float64Array(nil).Value() if err != nil { t.Fatalf("Expected no error for nil, got %v", err) } if result != nil { t.Errorf("Expected nil, got %q", result) } result, err = Float64Array([]float64{}).Value() if err != nil { t.Fatalf("Expected no error for empty, got %v", err) } if expected := `{}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected empty, got %q", result) } result, err = Float64Array([]float64{1.2, 3.4, 5.6}).Value() if err != nil { t.Fatalf("Expected no error, got %v", err) } if expected := `{1.2,3.4,5.6}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected %q, got %q", expected, result) } } func BenchmarkFloat64ArrayValue(b *testing.B) { rand.Seed(1) x := make([]float64, 10) for i := 0; i < len(x); i++ { x[i] = rand.NormFloat64() } a := Float64Array(x) for i := 0; i < b.N; i++ { a.Value() } } func TestInt64ArrayScanUnsupported(t *testing.T) { var arr Int64Array err := arr.Scan(true) if err == nil { t.Fatal("Expected error when scanning from bool") } if !strings.Contains(err.Error(), "bool to Int64Array") { t.Errorf("Expected type to be mentioned when scanning, got %q", err) } } func TestInt64ArrayScanEmpty(t *testing.T) { var arr Int64Array err := arr.Scan(`{}`) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr == nil || len(arr) != 0 { t.Errorf("Expected empty, got %#v", arr) } } func TestInt64ArrayScanNil(t *testing.T) { arr := Int64Array{5, 5, 5} err := arr.Scan(nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr != nil { t.Errorf("Expected nil, got %+v", arr) } } var Int64ArrayStringTests = []struct { str string arr Int64Array }{ {`{}`, Int64Array{}}, {`{12}`, Int64Array{12}}, {`{345,678}`, Int64Array{345, 678}}, } func TestInt64ArrayScanBytes(t *testing.T) { for _, tt := range Int64ArrayStringTests { bytes := []byte(tt.str) arr := Int64Array{5, 5, 5} err := arr.Scan(bytes) if err != nil { t.Fatalf("Expected no error for %q, got %v", bytes, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) } } } func BenchmarkInt64ArrayScanBytes(b *testing.B) { var a Int64Array var x interface{} = []byte(`{1,2,3,4,5,6,7,8,9,0}`) for i := 0; i < b.N; i++ { a = Int64Array{} a.Scan(x) } } func TestInt64ArrayScanString(t *testing.T) { for _, tt := range Int64ArrayStringTests { arr := Int64Array{5, 5, 5} err := arr.Scan(tt.str) if err != nil { t.Fatalf("Expected no error for %q, got %v", tt.str, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) } } } func TestInt64ArrayScanError(t *testing.T) { for _, tt := range []struct { input, err string }{ {``, "unable to parse array"}, {`{`, "unable to parse array"}, {`{{5},{6}}`, "cannot convert ARRAY[2][1] to Int64Array"}, {`{NULL}`, "parsing array element index 0:"}, {`{a}`, "parsing array element index 0:"}, {`{5,a}`, "parsing array element index 1:"}, {`{5,6,a}`, "parsing array element index 2:"}, } { arr := Int64Array{5, 5, 5} err := arr.Scan(tt.input) if err == nil { t.Fatalf("Expected error for %q, got none", tt.input) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) } if !reflect.DeepEqual(arr, Int64Array{5, 5, 5}) { t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) } } } func TestInt64ArrayValue(t *testing.T) { result, err := Int64Array(nil).Value() if err != nil { t.Fatalf("Expected no error for nil, got %v", err) } if result != nil { t.Errorf("Expected nil, got %q", result) } result, err = Int64Array([]int64{}).Value() if err != nil { t.Fatalf("Expected no error for empty, got %v", err) } if expected := `{}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected empty, got %q", result) } result, err = Int64Array([]int64{1, 2, 3}).Value() if err != nil { t.Fatalf("Expected no error, got %v", err) } if expected := `{1,2,3}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected %q, got %q", expected, result) } } func BenchmarkInt64ArrayValue(b *testing.B) { rand.Seed(1) x := make([]int64, 10) for i := 0; i < len(x); i++ { x[i] = rand.Int63() } a := Int64Array(x) for i := 0; i < b.N; i++ { a.Value() } } func TestFloat32ArrayScanUnsupported(t *testing.T) { var arr Float32Array err := arr.Scan(true) if err == nil { t.Fatal("Expected error when scanning from bool") } if !strings.Contains(err.Error(), "bool to Float32Array") { t.Errorf("Expected type to be mentioned when scanning, got %q", err) } } func TestFloat32ArrayScanEmpty(t *testing.T) { var arr Float32Array err := arr.Scan(`{}`) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr == nil || len(arr) != 0 { t.Errorf("Expected empty, got %#v", arr) } } func TestFloat32ArrayScanNil(t *testing.T) { arr := Float32Array{5, 5, 5} err := arr.Scan(nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr != nil { t.Errorf("Expected nil, got %+v", arr) } } var Float32ArrayStringTests = []struct { str string arr Float32Array }{ {`{}`, Float32Array{}}, {`{1.2}`, Float32Array{1.2}}, {`{3.456,7.89}`, Float32Array{3.456, 7.89}}, {`{3,1,2}`, Float32Array{3, 1, 2}}, } func TestFloat32ArrayScanBytes(t *testing.T) { for _, tt := range Float32ArrayStringTests { bytes := []byte(tt.str) arr := Float32Array{5, 5, 5} err := arr.Scan(bytes) if err != nil { t.Fatalf("Expected no error for %q, got %v", bytes, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) } } } func BenchmarkFloat32ArrayScanBytes(b *testing.B) { var a Float32Array var x interface{} = []byte(`{1.2,3.4,5.6,7.8,9.01,2.34,5.67,8.90,1.234,5.678}`) for i := 0; i < b.N; i++ { a = Float32Array{} a.Scan(x) } } func TestFloat32ArrayScanString(t *testing.T) { for _, tt := range Float32ArrayStringTests { arr := Float32Array{5, 5, 5} err := arr.Scan(tt.str) if err != nil { t.Fatalf("Expected no error for %q, got %v", tt.str, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) } } } func TestFloat32ArrayScanError(t *testing.T) { for _, tt := range []struct { input, err string }{ {``, "unable to parse array"}, {`{`, "unable to parse array"}, {`{{5.6},{7.8}}`, "cannot convert ARRAY[2][1] to Float32Array"}, {`{NULL}`, "parsing array element index 0:"}, {`{a}`, "parsing array element index 0:"}, {`{5.6,a}`, "parsing array element index 1:"}, {`{5.6,7.8,a}`, "parsing array element index 2:"}, } { arr := Float32Array{5, 5, 5} err := arr.Scan(tt.input) if err == nil { t.Fatalf("Expected error for %q, got none", tt.input) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) } if !reflect.DeepEqual(arr, Float32Array{5, 5, 5}) { t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) } } } func TestFloat32ArrayValue(t *testing.T) { result, err := Float32Array(nil).Value() if err != nil { t.Fatalf("Expected no error for nil, got %v", err) } if result != nil { t.Errorf("Expected nil, got %q", result) } result, err = Float32Array([]float32{}).Value() if err != nil { t.Fatalf("Expected no error for empty, got %v", err) } if expected := `{}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected empty, got %q", result) } result, err = Float32Array([]float32{1.2, 3.4, 5.6}).Value() if err != nil { t.Fatalf("Expected no error, got %v", err) } if expected := `{1.2,3.4,5.6}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected %q, got %q", expected, result) } } func BenchmarkFloat32ArrayValue(b *testing.B) { rand.Seed(1) x := make([]float32, 10) for i := 0; i < len(x); i++ { x[i] = rand.Float32() } a := Float32Array(x) for i := 0; i < b.N; i++ { a.Value() } } func TestInt32ArrayScanUnsupported(t *testing.T) { var arr Int32Array err := arr.Scan(true) if err == nil { t.Fatal("Expected error when scanning from bool") } if !strings.Contains(err.Error(), "bool to Int32Array") { t.Errorf("Expected type to be mentioned when scanning, got %q", err) } } func TestInt32ArrayScanEmpty(t *testing.T) { var arr Int32Array err := arr.Scan(`{}`) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr == nil || len(arr) != 0 { t.Errorf("Expected empty, got %#v", arr) } } func TestInt32ArrayScanNil(t *testing.T) { arr := Int32Array{5, 5, 5} err := arr.Scan(nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr != nil { t.Errorf("Expected nil, got %+v", arr) } } var Int32ArrayStringTests = []struct { str string arr Int32Array }{ {`{}`, Int32Array{}}, {`{12}`, Int32Array{12}}, {`{345,678}`, Int32Array{345, 678}}, } func TestInt32ArrayScanBytes(t *testing.T) { for _, tt := range Int32ArrayStringTests { bytes := []byte(tt.str) arr := Int32Array{5, 5, 5} err := arr.Scan(bytes) if err != nil { t.Fatalf("Expected no error for %q, got %v", bytes, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) } } } func BenchmarkInt32ArrayScanBytes(b *testing.B) { var a Int32Array var x interface{} = []byte(`{1,2,3,4,5,6,7,8,9,0}`) for i := 0; i < b.N; i++ { a = Int32Array{} a.Scan(x) } } func TestInt32ArrayScanString(t *testing.T) { for _, tt := range Int32ArrayStringTests { arr := Int32Array{5, 5, 5} err := arr.Scan(tt.str) if err != nil { t.Fatalf("Expected no error for %q, got %v", tt.str, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) } } } func TestInt32ArrayScanError(t *testing.T) { for _, tt := range []struct { input, err string }{ {``, "unable to parse array"}, {`{`, "unable to parse array"}, {`{{5},{6}}`, "cannot convert ARRAY[2][1] to Int32Array"}, {`{NULL}`, "parsing array element index 0:"}, {`{a}`, "parsing array element index 0:"}, {`{5,a}`, "parsing array element index 1:"}, {`{5,6,a}`, "parsing array element index 2:"}, } { arr := Int32Array{5, 5, 5} err := arr.Scan(tt.input) if err == nil { t.Fatalf("Expected error for %q, got none", tt.input) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) } if !reflect.DeepEqual(arr, Int32Array{5, 5, 5}) { t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) } } } func TestInt32ArrayValue(t *testing.T) { result, err := Int32Array(nil).Value() if err != nil { t.Fatalf("Expected no error for nil, got %v", err) } if result != nil { t.Errorf("Expected nil, got %q", result) } result, err = Int32Array([]int32{}).Value() if err != nil { t.Fatalf("Expected no error for empty, got %v", err) } if expected := `{}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected empty, got %q", result) } result, err = Int32Array([]int32{1, 2, 3}).Value() if err != nil { t.Fatalf("Expected no error, got %v", err) } if expected := `{1,2,3}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected %q, got %q", expected, result) } } func BenchmarkInt32ArrayValue(b *testing.B) { rand.Seed(1) x := make([]int32, 10) for i := 0; i < len(x); i++ { x[i] = rand.Int31() } a := Int32Array(x) for i := 0; i < b.N; i++ { a.Value() } } func TestStringArrayScanUnsupported(t *testing.T) { var arr StringArray err := arr.Scan(true) if err == nil { t.Fatal("Expected error when scanning from bool") } if !strings.Contains(err.Error(), "bool to StringArray") { t.Errorf("Expected type to be mentioned when scanning, got %q", err) } } func TestStringArrayScanEmpty(t *testing.T) { var arr StringArray err := arr.Scan(`{}`) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr == nil || len(arr) != 0 { t.Errorf("Expected empty, got %#v", arr) } } func TestStringArrayScanNil(t *testing.T) { arr := StringArray{"x", "x", "x"} err := arr.Scan(nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } if arr != nil { t.Errorf("Expected nil, got %+v", arr) } } var StringArrayStringTests = []struct { str string arr StringArray }{ {`{}`, StringArray{}}, {`{t}`, StringArray{"t"}}, {`{f,1}`, StringArray{"f", "1"}}, {`{"a\\b","c d",","}`, StringArray{"a\\b", "c d", ","}}, } func TestStringArrayScanBytes(t *testing.T) { for _, tt := range StringArrayStringTests { bytes := []byte(tt.str) arr := StringArray{"x", "x", "x"} err := arr.Scan(bytes) if err != nil { t.Fatalf("Expected no error for %q, got %v", bytes, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, bytes, arr) } } } func BenchmarkStringArrayScanBytes(b *testing.B) { var a StringArray var x interface{} = []byte(`{a,b,c,d,e,f,g,h,i,j}`) var y interface{} = []byte(`{"\a","\b","\c","\d","\e","\f","\g","\h","\i","\j"}`) for i := 0; i < b.N; i++ { a = StringArray{} a.Scan(x) a = StringArray{} a.Scan(y) } } func TestStringArrayScanString(t *testing.T) { for _, tt := range StringArrayStringTests { arr := StringArray{"x", "x", "x"} err := arr.Scan(tt.str) if err != nil { t.Fatalf("Expected no error for %q, got %v", tt.str, err) } if !reflect.DeepEqual(arr, tt.arr) { t.Errorf("Expected %+v for %q, got %+v", tt.arr, tt.str, arr) } } } func TestStringArrayScanError(t *testing.T) { for _, tt := range []struct { input, err string }{ {``, "unable to parse array"}, {`{`, "unable to parse array"}, {`{{a},{b}}`, "cannot convert ARRAY[2][1] to StringArray"}, {`{NULL}`, "parsing array element index 0: cannot convert nil to string"}, {`{a,NULL}`, "parsing array element index 1: cannot convert nil to string"}, {`{a,b,NULL}`, "parsing array element index 2: cannot convert nil to string"}, } { arr := StringArray{"x", "x", "x"} err := arr.Scan(tt.input) if err == nil { t.Fatalf("Expected error for %q, got none", tt.input) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for %q, got %q", tt.err, tt.input, err) } if !reflect.DeepEqual(arr, StringArray{"x", "x", "x"}) { t.Errorf("Expected destination not to change for %q, got %+v", tt.input, arr) } } } func TestStringArrayValue(t *testing.T) { result, err := StringArray(nil).Value() if err != nil { t.Fatalf("Expected no error for nil, got %v", err) } if result != nil { t.Errorf("Expected nil, got %q", result) } result, err = StringArray([]string{}).Value() if err != nil { t.Fatalf("Expected no error for empty, got %v", err) } if expected := `{}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected empty, got %q", result) } result, err = StringArray([]string{`a`, `\b`, `c"`, `d,e`}).Value() if err != nil { t.Fatalf("Expected no error, got %v", err) } if expected := `{"a","\\b","c\"","d,e"}`; !reflect.DeepEqual(result, expected) { t.Errorf("Expected %q, got %q", expected, result) } } func BenchmarkStringArrayValue(b *testing.B) { x := make([]string, 10) for i := 0; i < len(x); i++ { x[i] = strings.Repeat(`abc"def\ghi`, 5) } a := StringArray(x) for i := 0; i < b.N; i++ { a.Value() } } func TestGenericArrayScanUnsupported(t *testing.T) { var s string var ss []string var nsa [1]sql.NullString for _, tt := range []struct { src, dest interface{} err string }{ {nil, nil, "destination is not a pointer to array or slice"}, {nil, true, "destination bool is not a pointer to array or slice"}, {nil, &s, "destination *string is not a pointer to array or slice"}, {nil, ss, "destination []string is not a pointer to array or slice"}, {nil, &nsa, " to [1]sql.NullString"}, {true, &ss, "bool to []string"}, {`{{x}}`, &ss, "multidimensional ARRAY[1][1] is not implemented"}, {`{{x},{x}}`, &ss, "multidimensional ARRAY[2][1] is not implemented"}, {`{x}`, &ss, "scanning to string is not implemented"}, } { err := GenericArray{tt.dest}.Scan(tt.src) if err == nil { t.Fatalf("Expected error for [%#v %#v]", tt.src, tt.dest) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for [%#v %#v], got %q", tt.err, tt.src, tt.dest, err) } } } func TestGenericArrayScanScannerArrayBytes(t *testing.T) { src, expected, nsa := []byte(`{NULL,abc,"\""}`), [3]sql.NullString{{}, {String: `abc`, Valid: true}, {String: `"`, Valid: true}}, [3]sql.NullString{{String: ``, Valid: true}, {}, {}} if err := (GenericArray{&nsa}).Scan(src); err != nil { t.Fatalf("Expected no error, got %v", err) } if !reflect.DeepEqual(nsa, expected) { t.Errorf("Expected %v, got %v", expected, nsa) } } func TestGenericArrayScanScannerArrayString(t *testing.T) { src, expected, nsa := `{NULL,"\"",xyz}`, [3]sql.NullString{{}, {String: `"`, Valid: true}, {String: `xyz`, Valid: true}}, [3]sql.NullString{{String: ``, Valid: true}, {}, {}} if err := (GenericArray{&nsa}).Scan(src); err != nil { t.Fatalf("Expected no error, got %v", err) } if !reflect.DeepEqual(nsa, expected) { t.Errorf("Expected %v, got %v", expected, nsa) } } func TestGenericArrayScanScannerSliceEmpty(t *testing.T) { var nss []sql.NullString if err := (GenericArray{&nss}).Scan(`{}`); err != nil { t.Fatalf("Expected no error, got %v", err) } if nss == nil || len(nss) != 0 { t.Errorf("Expected empty, got %#v", nss) } } func TestGenericArrayScanScannerSliceNil(t *testing.T) { nss := []sql.NullString{{String: ``, Valid: true}, {}} if err := (GenericArray{&nss}).Scan(nil); err != nil { t.Fatalf("Expected no error, got %v", err) } if nss != nil { t.Errorf("Expected nil, got %+v", nss) } } func TestGenericArrayScanScannerSliceBytes(t *testing.T) { src, expected, nss := []byte(`{NULL,abc,"\""}`), []sql.NullString{{}, {String: `abc`, Valid: true}, {String: `"`, Valid: true}}, []sql.NullString{{String: ``, Valid: true}, {}, {}, {}, {}} if err := (GenericArray{&nss}).Scan(src); err != nil { t.Fatalf("Expected no error, got %v", err) } if !reflect.DeepEqual(nss, expected) { t.Errorf("Expected %v, got %v", expected, nss) } } func BenchmarkGenericArrayScanScannerSliceBytes(b *testing.B) { var a GenericArray var x interface{} = []byte(`{a,b,c,d,e,f,g,h,i,j}`) var y interface{} = []byte(`{"\a","\b","\c","\d","\e","\f","\g","\h","\i","\j"}`) for i := 0; i < b.N; i++ { a = GenericArray{new([]sql.NullString)} a.Scan(x) a = GenericArray{new([]sql.NullString)} a.Scan(y) } } func TestGenericArrayScanScannerSliceString(t *testing.T) { src, expected, nss := `{NULL,"\"",xyz}`, []sql.NullString{{}, {String: `"`, Valid: true}, {String: `xyz`, Valid: true}}, []sql.NullString{{String: ``, Valid: true}, {}, {}} if err := (GenericArray{&nss}).Scan(src); err != nil { t.Fatalf("Expected no error, got %v", err) } if !reflect.DeepEqual(nss, expected) { t.Errorf("Expected %v, got %v", expected, nss) } } type TildeNullInt64 struct{ sql.NullInt64 } func (TildeNullInt64) ArrayDelimiter() string { return "~" } func TestGenericArrayScanDelimiter(t *testing.T) { src, expected, tnis := `{12~NULL~76}`, []TildeNullInt64{{sql.NullInt64{Int64: 12, Valid: true}}, {}, {sql.NullInt64{Int64: 76, Valid: true}}}, []TildeNullInt64{{sql.NullInt64{Int64: 0, Valid: true}}, {}} if err := (GenericArray{&tnis}).Scan(src); err != nil { t.Fatalf("Expected no error for %#v, got %v", src, err) } if !reflect.DeepEqual(tnis, expected) { t.Errorf("Expected %v for %#v, got %v", expected, src, tnis) } } func TestGenericArrayScanErrors(t *testing.T) { var sa [1]string var nis []sql.NullInt64 var pss *[]string for _, tt := range []struct { src, dest interface{} err string }{ {nil, pss, "destination *[]string is nil"}, {`{`, &sa, "unable to parse"}, {`{}`, &sa, "cannot convert ARRAY[0] to [1]string"}, {`{x,x}`, &sa, "cannot convert ARRAY[2] to [1]string"}, {`{x}`, &nis, `parsing array element index 0: converting`}, } { err := GenericArray{tt.dest}.Scan(tt.src) if err == nil { t.Fatalf("Expected error for [%#v %#v]", tt.src, tt.dest) } if !strings.Contains(err.Error(), tt.err) { t.Errorf("Expected error to contain %q for [%#v %#v], got %q", tt.err, tt.src, tt.dest, err) } } } func TestGenericArrayValueUnsupported(t *testing.T) { _, err := GenericArray{true}.Value() if err == nil { t.Fatal("Expected error for bool") } if !strings.Contains(err.Error(), "bool to array") { t.Errorf("Expected type to be mentioned, got %q", err) } } type ByteArrayValuer [1]byte type ByteSliceValuer []byte type FuncArrayValuer struct { delimiter func() string value func() (driver.Value, error) } func (a ByteArrayValuer) Value() (driver.Value, error) { return a[:], nil } func (b ByteSliceValuer) Value() (driver.Value, error) { return []byte(b), nil } func (f FuncArrayValuer) ArrayDelimiter() string { return f.delimiter() } func (f FuncArrayValuer) Value() (driver.Value, error) { return f.value() } func TestGenericArrayValue(t *testing.T) { result, err := GenericArray{nil}.Value() if err != nil { t.Fatalf("Expected no error for nil, got %v", err) } if result != nil { t.Errorf("Expected nil, got %q", result) } for _, tt := range []interface{}{ []bool(nil), [][]int(nil), []*int(nil), []sql.NullString(nil), } { result, err := GenericArray{tt}.Value() if err != nil { t.Fatalf("Expected no error for %#v, got %v", tt, err) } if result != nil { t.Errorf("Expected nil for %#v, got %q", tt, result) } } Tilde := func(v driver.Value) FuncArrayValuer { return FuncArrayValuer{ func() string { return "~" }, func() (driver.Value, error) { return v, nil }} } for _, tt := range []struct { result string input interface{} }{ {`{}`, []bool{}}, {`{true}`, []bool{true}}, {`{true,false}`, []bool{true, false}}, {`{true,false}`, [2]bool{true, false}}, {`{}`, [][]int{{}}}, {`{}`, [][]int{{}, {}}}, {`{{1}}`, [][]int{{1}}}, {`{{1},{2}}`, [][]int{{1}, {2}}}, {`{{1,2},{3,4}}`, [][]int{{1, 2}, {3, 4}}}, {`{{1,2},{3,4}}`, [2][2]int{{1, 2}, {3, 4}}}, {`{"a","\\b","c\"","d,e"}`, []string{`a`, `\b`, `c"`, `d,e`}}, {`{"a","\\b","c\"","d,e"}`, [][]byte{{'a'}, {'\\', 'b'}, {'c', '"'}, {'d', ',', 'e'}}}, {`{NULL}`, []*int{nil}}, {`{0,NULL}`, []*int{new(int), nil}}, {`{NULL}`, []sql.NullString{{}}}, {`{"\"",NULL}`, []sql.NullString{{String: `"`, Valid: true}, {}}}, {`{"a","b"}`, []ByteArrayValuer{{'a'}, {'b'}}}, {`{{"a","b"},{"c","d"}}`, [][]ByteArrayValuer{{{'a'}, {'b'}}, {{'c'}, {'d'}}}}, {`{"e","f"}`, []ByteSliceValuer{{'e'}, {'f'}}}, {`{{"e","f"},{"g","h"}}`, [][]ByteSliceValuer{{{'e'}, {'f'}}, {{'g'}, {'h'}}}}, {`{1~2}`, []FuncArrayValuer{Tilde(int64(1)), Tilde(int64(2))}}, {`{{1~2}~{3~4}}`, [][]FuncArrayValuer{{Tilde(int64(1)), Tilde(int64(2))}, {Tilde(int64(3)), Tilde(int64(4))}}}, } { result, err := GenericArray{tt.input}.Value() if err != nil { t.Fatalf("Expected no error for %q, got %v", tt.input, err) } if !reflect.DeepEqual(result, tt.result) { t.Errorf("Expected %q for %q, got %q", tt.result, tt.input, result) } } } func TestGenericArrayValueErrors(t *testing.T) { v := []interface{}{func() {}} if _, err := (GenericArray{v}).Value(); err == nil { t.Errorf("Expected error for %q, got nil", v) } v = []interface{}{nil, func() {}} if _, err := (GenericArray{v}).Value(); err == nil { t.Errorf("Expected error for %q, got nil", v) } } func BenchmarkGenericArrayValueBools(b *testing.B) { rand.Seed(1) x := make([]bool, 10) for i := 0; i < len(x); i++ { x[i] = rand.Intn(2) == 0 } a := GenericArray{x} for i := 0; i < b.N; i++ { a.Value() } } func BenchmarkGenericArrayValueFloat64s(b *testing.B) { rand.Seed(1) x := make([]float64, 10) for i := 0; i < len(x); i++ { x[i] = rand.NormFloat64() } a := GenericArray{x} for i := 0; i < b.N; i++ { a.Value() } } func BenchmarkGenericArrayValueInt64s(b *testing.B) { rand.Seed(1) x := make([]int64, 10) for i := 0; i < len(x); i++ { x[i] = rand.Int63() } a := GenericArray{x} for i := 0; i < b.N; i++ { a.Value() } } func BenchmarkGenericArrayValueByteSlices(b *testing.B) { x := make([][]byte, 10) for i := 0; i < len(x); i++ { x[i] = bytes.Repeat([]byte(`abc"def\ghi`), 5) } a := GenericArray{x} for i := 0; i < b.N; i++ { a.Value() } } func BenchmarkGenericArrayValueStrings(b *testing.B) { x := make([]string, 10) for i := 0; i < len(x); i++ { x[i] = strings.Repeat(`abc"def\ghi`, 5) } a := GenericArray{x} for i := 0; i < b.N; i++ { a.Value() } } func TestArrayScanBackend(t *testing.T) { db := openTestConn(t) defer db.Close() for _, tt := range []struct { s string d sql.Scanner e interface{} }{ {`ARRAY[true, false]`, new(BoolArray), &BoolArray{true, false}}, {`ARRAY[E'\\xdead', E'\\xbeef']`, new(ByteaArray), &ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}}, {`ARRAY[1.2, 3.4]`, new(Float64Array), &Float64Array{1.2, 3.4}}, {`ARRAY[1, 2, 3]`, new(Int64Array), &Int64Array{1, 2, 3}}, {`ARRAY['a', E'\\b', 'c"', 'd,e']`, new(StringArray), &StringArray{`a`, `\b`, `c"`, `d,e`}}, } { err := db.QueryRow(`SELECT ` + tt.s).Scan(tt.d) if err != nil { t.Errorf("Expected no error when scanning %s into %T, got %v", tt.s, tt.d, err) } if !reflect.DeepEqual(tt.d, tt.e) { t.Errorf("Expected %v when scanning %s into %T, got %v", tt.e, tt.s, tt.d, tt.d) } } } func TestArrayValueBackend(t *testing.T) { db := openTestConn(t) defer db.Close() for _, tt := range []struct { s string v driver.Valuer }{ {`ARRAY[true, false]`, BoolArray{true, false}}, {`ARRAY[E'\\xdead', E'\\xbeef']`, ByteaArray{{'\xDE', '\xAD'}, {'\xBE', '\xEF'}}}, {`ARRAY[1.2, 3.4]`, Float64Array{1.2, 3.4}}, {`ARRAY[1, 2, 3]`, Int64Array{1, 2, 3}}, {`ARRAY['a', E'\\b', 'c"', 'd,e']`, StringArray{`a`, `\b`, `c"`, `d,e`}}, } { var x int err := db.QueryRow(`SELECT 1 WHERE `+tt.s+` <> $1`, tt.v).Scan(&x) if err != sql.ErrNoRows { t.Errorf("Expected %v to equal %s, got %v", tt.v, tt.s, err) } } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/buf.go0000644000000000000000000000312615024302467021034 0ustar rootrootpackage pq import ( "bytes" "encoding/binary" "github.com/lib/pq/oid" ) type readBuf []byte func (b *readBuf) int32() (n int) { n = int(int32(binary.BigEndian.Uint32(*b))) *b = (*b)[4:] return } func (b *readBuf) oid() (n oid.Oid) { n = oid.Oid(binary.BigEndian.Uint32(*b)) *b = (*b)[4:] return } // N.B: this is actually an unsigned 16-bit integer, unlike int32 func (b *readBuf) int16() (n int) { n = int(binary.BigEndian.Uint16(*b)) *b = (*b)[2:] return } func (b *readBuf) string() string { i := bytes.IndexByte(*b, 0) if i < 0 { errorf("invalid message format; expected string terminator") } s := (*b)[:i] *b = (*b)[i+1:] return string(s) } func (b *readBuf) next(n int) (v []byte) { v = (*b)[:n] *b = (*b)[n:] return } func (b *readBuf) byte() byte { return b.next(1)[0] } type writeBuf struct { buf []byte pos int } func (b *writeBuf) int32(n int) { x := make([]byte, 4) binary.BigEndian.PutUint32(x, uint32(n)) b.buf = append(b.buf, x...) } func (b *writeBuf) int16(n int) { x := make([]byte, 2) binary.BigEndian.PutUint16(x, uint16(n)) b.buf = append(b.buf, x...) } func (b *writeBuf) string(s string) { b.buf = append(append(b.buf, s...), '\000') } func (b *writeBuf) byte(c byte) { b.buf = append(b.buf, c) } func (b *writeBuf) bytes(v []byte) { b.buf = append(b.buf, v...) } func (b *writeBuf) wrap() []byte { p := b.buf[b.pos:] binary.BigEndian.PutUint32(p, uint32(len(p))) return b.buf } func (b *writeBuf) next(c byte) { p := b.buf[b.pos:] binary.BigEndian.PutUint32(p, uint32(len(p))) b.pos = len(b.buf) + 1 b.buf = append(b.buf, c, 0, 0, 0, 0) } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/conn_go18.go0000644000000000000000000001320715024302467022054 0ustar rootrootpackage pq import ( "context" "database/sql" "database/sql/driver" "fmt" "io" "io/ioutil" "time" ) const ( watchCancelDialContextTimeout = time.Second * 10 ) // Implement the "QueryerContext" interface func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { list := make([]driver.Value, len(args)) for i, nv := range args { list[i] = nv.Value } finish := cn.watchCancel(ctx) r, err := cn.query(query, list) if err != nil { if finish != nil { finish() } return nil, err } r.finish = finish return r, nil } // Implement the "ExecerContext" interface func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { list := make([]driver.Value, len(args)) for i, nv := range args { list[i] = nv.Value } if finish := cn.watchCancel(ctx); finish != nil { defer finish() } return cn.Exec(query, list) } // Implement the "ConnPrepareContext" interface func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { if finish := cn.watchCancel(ctx); finish != nil { defer finish() } return cn.Prepare(query) } // Implement the "ConnBeginTx" interface func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { var mode string switch sql.IsolationLevel(opts.Isolation) { case sql.LevelDefault: // Don't touch mode: use the server's default case sql.LevelReadUncommitted: mode = " ISOLATION LEVEL READ UNCOMMITTED" case sql.LevelReadCommitted: mode = " ISOLATION LEVEL READ COMMITTED" case sql.LevelRepeatableRead: mode = " ISOLATION LEVEL REPEATABLE READ" case sql.LevelSerializable: mode = " ISOLATION LEVEL SERIALIZABLE" default: return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) } if opts.ReadOnly { mode += " READ ONLY" } else { mode += " READ WRITE" } tx, err := cn.begin(mode) if err != nil { return nil, err } cn.txnFinish = cn.watchCancel(ctx) return tx, nil } func (cn *conn) Ping(ctx context.Context) error { if finish := cn.watchCancel(ctx); finish != nil { defer finish() } rows, err := cn.simpleQuery(";") if err != nil { return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger } rows.Close() return nil } func (cn *conn) watchCancel(ctx context.Context) func() { if done := ctx.Done(); done != nil { finished := make(chan struct{}, 1) go func() { select { case <-done: select { case finished <- struct{}{}: default: // We raced with the finish func, let the next query handle this with the // context. return } // Set the connection state to bad so it does not get reused. cn.err.set(ctx.Err()) // At this point the function level context is canceled, // so it must not be used for the additional network // request to cancel the query. // Create a new context to pass into the dial. ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) defer cancel() _ = cn.cancel(ctxCancel) case <-finished: } }() return func() { select { case <-finished: cn.err.set(ctx.Err()) cn.Close() case finished <- struct{}{}: } } } return nil } func (cn *conn) cancel(ctx context.Context) error { // Create a new values map (copy). This makes sure the connection created // in this method cannot write to the same underlying data, which could // cause a concurrent map write panic. This is necessary because cancel // is called from a goroutine in watchCancel. o := make(values) for k, v := range cn.opts { o[k] = v } c, err := dial(ctx, cn.dialer, o) if err != nil { return err } defer c.Close() { can := conn{ c: c, } err = can.ssl(o) if err != nil { return err } w := can.writeBuf(0) w.int32(80877102) // cancel request code w.int32(cn.processID) w.int32(cn.secretKey) if err := can.sendStartupPacket(w); err != nil { return err } } // Read until EOF to ensure that the server received the cancel. { _, err := io.Copy(ioutil.Discard, c) return err } } // Implement the "StmtQueryContext" interface func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { list := make([]driver.Value, len(args)) for i, nv := range args { list[i] = nv.Value } finish := st.watchCancel(ctx) r, err := st.query(list) if err != nil { if finish != nil { finish() } return nil, err } r.finish = finish return r, nil } // Implement the "StmtExecContext" interface func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { list := make([]driver.Value, len(args)) for i, nv := range args { list[i] = nv.Value } if finish := st.watchCancel(ctx); finish != nil { defer finish() } return st.Exec(list) } // watchCancel is implemented on stmt in order to not mark the parent conn as bad func (st *stmt) watchCancel(ctx context.Context) func() { if done := ctx.Done(); done != nil { finished := make(chan struct{}) go func() { select { case <-done: // At this point the function level context is canceled, // so it must not be used for the additional network // request to cancel the query. // Create a new context to pass into the dial. ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) defer cancel() _ = st.cancel(ctxCancel) finished <- struct{}{} case <-finished: } }() return func() { select { case <-finished: case finished <- struct{}{}: } } } return nil } func (st *stmt) cancel(ctx context.Context) error { return st.cn.cancel(ctx) } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/0000755000000000000000000000000015024302467021047 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/server.crt0000644000000000000000000000246615024302467023077 0ustar rootroot-----BEGIN CERTIFICATE----- MIIDqzCCApOgAwIBAgIJAPiewLrOyYipMA0GCSqGSIb3DQEBCwUAMF4xCzAJBgNV BAYTAlVTMQ8wDQYDVQQIDAZOZXZhZGExEjAQBgNVBAcMCUxhcyBWZWdhczEaMBgG A1UECgwRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMMBXBxIENBMB4XDTIxMDkw MjAxNTUwMloXDTMxMDkwMzAxNTUwMlowTjELMAkGA1UEBhMCVVMxDzANBgNVBAgM Bk5ldmFkYTESMBAGA1UEBwwJTGFzIFZlZ2FzMRowGAYDVQQKDBFnaXRodWIuY29t L2xpYi9wcTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKf6H4UzmANN QiQJe92Mf3ETMYmpZKNNO9DPEHyNLIkag+XwMrBTdcCK0mLvsNCYpXuBN6703KCd WAFOeMmj7gOsWtvjt5Xm6bRHLgegekXzcG/jDwq/wyzeDzr/YkITuIlG44Lf9lhY FLwiHlHOWHnwrZaEh6aU//02aQkzyX5INeXl/3TZm2G2eIH6AOxOKOU27MUsyVSQ 5DE+SDKGcRP4bElueeQWvxAXNMZYb7sVSDdfHI3zr32K4k/tC8x0fZJ5XN/dvl4t 4N4MrYlmDO5XOrb/gQH1H4iu6+5EMDfZYab4fkThnNFdfFqu4/8Scv7KZ8mWqpKM fGAjEPctQi0CAwEAAaN8MHowHQYDVR0OBBYEFENExPbmDyFB2AJUdbMvVyhlNPD5 MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdEQQMMAqCCHBvc3RncmVzMCwG CWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTANBgkq hkiG9w0BAQsFAAOCAQEAMRVbV8RiEsmp9HAtnVCZmRXMIbgPGrqjeSwk586s4K8v BSqNCqxv6s5GfCRmDYiqSqeuCVDtUJS1HsTmbxVV7Ke71WMo+xHR1ICGKOa8WGCb TGsuicG5QZXWaxeMOg4s0qpKmKko0d1aErdVsanU5dkrVS7D6729Ffnzu4lwApk6 invAB67p8u7sojwqRq5ce0vRaG+YFylTrWomF9kauEb8gKbQ9Xc7QfX+h+UH/mq9 Nvdj8LOHp6/82bZdnsYUOtV4lS1IA/qzeXpqBphxqfWabD1yLtkyJyImZKq8uIPp 0CG4jhObPdWcCkXD6bg3QK3mhwlC79OtFgxWmldCRQ== -----END CERTIFICATE----- dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/postgresql.cnf0000644000000000000000000000026415024302467023744 0ustar rootroot[req] distinguished_name = req_distinguished_name prompt = no [req_distinguished_name] C = US ST = Nevada L = Las Vegas O = github.com/lib/pq CN = pqgosslcert dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/bogus_root.crt0000644000000000000000000000212715024302467023745 0ustar rootroot-----BEGIN CERTIFICATE----- MIIDBjCCAe6gAwIBAgIQSnDYp/Naet9HOZljF5PuwDANBgkqhkiG9w0BAQsFADAr MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0x NjAyMDcxNjQ0MzdaFw0xNzAyMDYxNjQ0MzdaMCsxEjAQBgNVBAoTCUNvY2tyb2Fj aDEVMBMGA1UEAxMMQ29ja3JvYWNoIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A MIIBCgKCAQEAxdln3/UdgP7ayA/G1kT7upjLe4ERwQjYQ25q0e1+vgsB5jhiirxJ e0+WkhhYu/mwoSAXzvlsbZ2PWFyfdanZeD/Lh6SvIeWXVVaPcWVWL1TEcoN2jr5+ E85MMHmbbmaT2he8s6br2tM/UZxyTQ2XRprIzApbDssyw1c0Yufcpu3C6267FLEl IfcWrzDhnluFhthhtGXv3ToD8IuMScMC5qlKBXtKmD1B5x14ngO/ecNJ+OlEi0HU mavK4KWgI2rDXRZ2EnCpyTZdkc3kkRnzKcg653oOjMDRZdrhfIrha+Jq38ACsUmZ Su7Sp5jkIHOCO8Zg+l6GKVSq37dKMapD8wIDAQABoyYwJDAOBgNVHQ8BAf8EBAMC AuQwEgYDVR0TAQH/BAgwBgEB/wIBATANBgkqhkiG9w0BAQsFAAOCAQEAwZ2Tu0Yu rrSVdMdoPEjT1IZd+5OhM/SLzL0ddtvTithRweLHsw2lDQYlXFqr24i3UGZJQ1sp cqSrNwswgLUQT3vWyTjmM51HEb2vMYWKmjZ+sBQYAUP1CadrN/+OTfNGnlF1+B4w IXOzh7EvQmJJnNybLe4a/aRvj1NE2n8Z898B76SVU9WbfKKz8VwLzuIPDqkKcZda lMy5yzthyztV9YjcWs2zVOUGZvGdAhDrvZuUq6mSmxrBEvR2LBOggmVf3tGRT+Ls lW7c9Lrva5zLHuqmoPP07A+vuI9a0D1X44jwGDuPWJ5RnTOQ63Uez12mKNjqleHw DnkwNanuO8dhAA== -----END CERTIFICATE----- dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/Makefile0000644000000000000000000000217215024302467022511 0ustar rootroot.PHONY: all root-ssl server-ssl client-ssl # Rebuilds self-signed root/server/client certs/keys in a consistent way all: root-ssl server-ssl client-ssl rm -f .srl root-ssl: openssl req -new -sha256 -nodes -newkey rsa:2048 \ -config ./certs/root.cnf \ -keyout /tmp/root.key \ -out /tmp/root.csr openssl x509 -req -days 3653 -sha256 \ -in /tmp/root.csr \ -extfile /etc/ssl/openssl.cnf -extensions v3_ca \ -signkey /tmp/root.key \ -out ./certs/root.crt server-ssl: openssl req -new -sha256 -nodes -newkey rsa:2048 \ -config ./certs/server.cnf \ -keyout ./certs/server.key \ -out /tmp/server.csr openssl x509 -req -days 3653 -sha256 \ -extfile ./certs/server.cnf -extensions req_ext \ -CA ./certs/root.crt -CAkey /tmp/root.key -CAcreateserial \ -in /tmp/server.csr \ -out ./certs/server.crt client-ssl: openssl req -new -sha256 -nodes -newkey rsa:2048 \ -config ./certs/postgresql.cnf \ -keyout ./certs/postgresql.key \ -out /tmp/postgresql.csr openssl x509 -req -days 3653 -sha256 \ -CA ./certs/root.crt -CAkey /tmp/root.key -CAcreateserial \ -in /tmp/postgresql.csr \ -out ./certs/postgresql.crt dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/postgresql.crt0000644000000000000000000000224015024302467023762 0ustar rootroot-----BEGIN CERTIFICATE----- MIIDPjCCAiYCCQD4nsC6zsmIqjANBgkqhkiG9w0BAQsFADBeMQswCQYDVQQGEwJV UzEPMA0GA1UECAwGTmV2YWRhMRIwEAYDVQQHDAlMYXMgVmVnYXMxGjAYBgNVBAoM EWdpdGh1Yi5jb20vbGliL3BxMQ4wDAYDVQQDDAVwcSBDQTAeFw0yMTA5MDIwMTU1 MDJaFw0zMTA5MDMwMTU1MDJaMGQxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIDAZOZXZh ZGExEjAQBgNVBAcMCUxhcyBWZWdhczEaMBgGA1UECgwRZ2l0aHViLmNvbS9saWIv cHExFDASBgNVBAMMC3BxZ29zc2xjZXJ0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A MIIBCgKCAQEAx0ucPVUNCrVmbyithwWrmmZ1dGudBwhSyDB6af4z5Cr+S6dx2SRU UGUw3Lv+z+tUqQ7hJj0oNddIQeYKl/Tt6JPpZsQfERP/cUGedtyt7HnCKobBL+0B NvHnDIUiIL4LgfiZK4DWJkGmm7nTHo/7qKAw60vCMLUW98DC0Xhlk9MHYG+e9Zai 3G0vY2X6DUYcSmzBI3JakFEgMZTQg3ofUQMz8TYeK3/DYadLXkl08d18LL3Dnefx 0xRuBPNTa2tLfVnFkfFi6Z9xVB/WhG6+X4OLnO85v5xUOGTV+g154iR7FOkrrl5F lEUBj+yaIoTRi+MyZ/oYqWwQUDYS3+Te9wIDAQABMA0GCSqGSIb3DQEBCwUAA4IB AQCCJpwUWCx7xfXv3vH3LQcffZycyRHYPgTCbiQw3x9aBb77jUAh5O6lEj/W0nx2 SCTEsCsRSAiFwfUb+g/AFCW84dELRWmf38eoqACebLymqnvxyZA+O87yu07XyFZR TnmbDMzZgsyWWGwS3JoGFk+ibWY4AImYQnSJO8Pi0kZ37ngbAyJ3RtDhhEQJWw/Q D04p3uky/ea7Gyz0QTx5o40n4gq7nEzF1OS6IHozM840J5aZrxRiXEa56fsmJHmI IGyI07SGlWJ15r1wc8lB+8ilnAqH1QQlYzTIW0Q4NZE7n3uQg1EVuueGiGO2ex2/ he9lDiJfOQuPuLbOxzctP9v9 -----END CERTIFICATE----- dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/postgresql.key0000644000000000000000000000325015024302467023764 0ustar rootroot-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDHS5w9VQ0KtWZv KK2HBauaZnV0a50HCFLIMHpp/jPkKv5Lp3HZJFRQZTDcu/7P61SpDuEmPSg110hB 5gqX9O3ok+lmxB8RE/9xQZ523K3secIqhsEv7QE28ecMhSIgvguB+JkrgNYmQaab udMej/uooDDrS8IwtRb3wMLReGWT0wdgb571lqLcbS9jZfoNRhxKbMEjclqQUSAx lNCDeh9RAzPxNh4rf8Nhp0teSXTx3XwsvcOd5/HTFG4E81Nra0t9WcWR8WLpn3FU H9aEbr5fg4uc7zm/nFQ4ZNX6DXniJHsU6SuuXkWURQGP7JoihNGL4zJn+hipbBBQ NhLf5N73AgMBAAECggEAHLNY1sRO0oH5NHzpMI6yfdPPimqM/JxIP6grmOQQ2QUQ BhkhHiJLOiC4frFcKtk7IfWQmw8noUlVkJfuYp/VOy9B55jK2IzGtqq6hWeWbH3E Zpdtbtd021LO8VCi75Au3BLPDCLLtEq0Ea0bKEWX+lrHcLtCRf1uR1OtOrlZ94Wl DUhm7YJC4cS1bi6Kdf03R+fw2oFi7/QdywcT4ow032jGWOly/Jl7bSHZK7xLtM/i 9HfMwmusD/iuz7mtLU7VCpnlKZm6MfS5D427ybW8MruuiZEtQJ6QtRIrHBHk93aK Op0tjJ6tMav1UsJzgVz9+uWILE9l0AjAa4AvbfNzEQKBgQD8mma9SLQPtBb6cXuT CQgjE4vyph8mRnm/pTz3QLIpMiLy2+aKJD/u4cduzLw1vjuH1tlb7NQ9c891jAJh JhwDwqKAXfFicfRs/PYWngx/XtGhbbpgm1yA6XuYL1D06gzmjzXgHvZMOFcts+GF y0JEuV7v6eYrpQJRQYCwY6xTgwKBgQDJ+bHAlgOaC94DZEXZMiUznCCjBjAstiXG BEN7Cnfn6vgvPm/b6BkKn4VrsCmbZQKT7QJDSOhYwXCC2ZlrKiF8GEUHX4mi8347 8B+DsuokTLNmN61QAZbb1c3XQVnr15xH8ijm7yYs4tCBmVLKBmpw1T4IZXXlVE5k gmee+AwIfQKBgGr+P0wnclVAc4cq8CusZKzux5VEtebxbPo21CbqWUxHtzPk3rZe elIFggK1Z3bgF7kG0NQ18QQCfLoOTqe1i6IwG8KBiA+pst1DHD0iPqroj6RvpMTs qXbU7ovcZs8GH+a8fBZtJufL6WkrSvfvyybu2X6HNP4Bi4S9WPPdlA1fAoGAE5m/ vkjQoKp2KS4Z+TH8mj2UjT2Uf0JN+CGByvcBG+iZnTwZ7uVfSMCiWgkGgKYU0fY2 OgFhSvu6x3gGg3fbOAfC6yxCVyX6IibzZ/x87HjlEA5nK1R8J2lgSHt3FoQeDn1Z qs+ajNCWG32doy1sNvb6xiXSgybjVK2zEKJRyKECgYBJTk2IABebjvInNb6tagcI nD4d2LgBmZJZsTruHXrpO0s3XCQcFKks4JKH1CVjd34f7LkxzEOGbE7wKBBd652s ob6gFKnbqTniTo3NRUycB6ymo4LSaBvKgeY5hYbVxrYheRLPGY+gPVYb3VMKu9N9 76rcaFqJOz7OeywRG5bHUg== -----END PRIVATE KEY----- dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/root.cnf0000644000000000000000000000025615024302467022525 0ustar rootroot[req] distinguished_name = req_distinguished_name prompt = no [req_distinguished_name] C = US ST = Nevada L = Las Vegas O = github.com/lib/pq CN = pq CA dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/root.crt0000644000000000000000000000266015024302467022550 0ustar rootroot-----BEGIN CERTIFICATE----- MIIEBjCCAu6gAwIBAgIJAPizR+OD14YnMA0GCSqGSIb3DQEBCwUAMF4xCzAJBgNV BAYTAlVTMQ8wDQYDVQQIDAZOZXZhZGExEjAQBgNVBAcMCUxhcyBWZWdhczEaMBgG A1UECgwRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMMBXBxIENBMB4XDTIxMDkw MjAxNTUwMloXDTMxMDkwMzAxNTUwMlowXjELMAkGA1UEBhMCVVMxDzANBgNVBAgM Bk5ldmFkYTESMBAGA1UEBwwJTGFzIFZlZ2FzMRowGAYDVQQKDBFnaXRodWIuY29t L2xpYi9wcTEOMAwGA1UEAwwFcHEgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQDb9d6sjdU6GdibGrXRMOHREH3MRUS8T4TFqGgPEGVDP/V5bAZlBSGP AN0o9DTyVLcbQpBt8zMTw9KeIzIIe5NIVkSmA16lw/YckGhOM+kZIkiDuE6qt5Ia OQCRMdXkZ8ejG/JUu+rHU8FJZL8DE+jyYherzdjkeVAQ7JfzxAwW2Dl7T/47g337 Pwmf17AEb8ibSqmXyUN7R5NhJQs+hvaYdNagzdx91E1H+qlyBvmiNeasUQljLvZ+ Y8wAuU79neA+d09O4PBiYwV17rSP6SZCeGE3oLZviL/0KM9Xig88oB+2FmvQ6Zxa L7SoBlqS+5pBZwpH7eee/wCIKAnJtMAJAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUw AwEB/zAdBgNVHQ4EFgQUfIXEczahbcM2cFrwclJF7GbdajkwgZAGA1UdIwSBiDCB hYAUfIXEczahbcM2cFrwclJF7GbdajmhYqRgMF4xCzAJBgNVBAYTAlVTMQ8wDQYD VQQIDAZOZXZhZGExEjAQBgNVBAcMCUxhcyBWZWdhczEaMBgGA1UECgwRZ2l0aHVi LmNvbS9saWIvcHExDjAMBgNVBAMMBXBxIENBggkA+LNH44PXhicwDQYJKoZIhvcN AQELBQADggEBABFyGgSz2mHVJqYgX1Y+7P+MfKt83cV2uYDGYvXrLG2OGiCilVul oTBG+8omIMSHOsQZvWMpA5H0tnnlQHrKpKpUyKkSL+Wv5GL0UtBmHX7mVRiaK2l4 q2BjRaQUitp/FH4NSdXtVrMME5T1JBBZHsQkNL3cNRzRKwY/Vj5UGEDxDS7lILUC e01L4oaK0iKQn4beALU+TvKoAHdPvoxpPpnhkF5ss9HmdcvRktJrKZemDJZswZ7/ +omx8ZPIYYUH5VJJYYE88S7guAt+ZaKIUlel/t6xPbo2ZySFSg9u1uB99n+jTo3L 1rAxFnN3FCX2jBqgP29xMVmisaN5k04UmyI= -----END CERTIFICATE----- dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/server.cnf0000644000000000000000000000136715024302467023054 0ustar rootroot[ req ] default_bits = 2048 distinguished_name = subject req_extensions = req_ext x509_extensions = x509_ext string_mask = utf8only prompt = no [ subject ] C = US ST = Nevada L = Las Vegas O = github.com/lib/pq [ x509_ext ] subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer basicConstraints = CA:FALSE keyUsage = digitalSignature, keyEncipherment subjectAltName = DNS:postgres nsComment = "OpenSSL Generated Certificate" [ req_ext ] subjectKeyIdentifier = hash basicConstraints = CA:FALSE keyUsage = digitalSignature, keyEncipherment subjectAltName = DNS:postgres nsComment = "OpenSSL Generated Certificate" dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/server.key0000644000000000000000000000325015024302467023067 0ustar rootroot-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCn+h+FM5gDTUIk CXvdjH9xEzGJqWSjTTvQzxB8jSyJGoPl8DKwU3XAitJi77DQmKV7gTeu9NygnVgB TnjJo+4DrFrb47eV5um0Ry4HoHpF83Bv4w8Kv8Ms3g86/2JCE7iJRuOC3/ZYWBS8 Ih5Rzlh58K2WhIemlP/9NmkJM8l+SDXl5f902ZthtniB+gDsTijlNuzFLMlUkOQx PkgyhnET+GxJbnnkFr8QFzTGWG+7FUg3XxyN8699iuJP7QvMdH2SeVzf3b5eLeDe DK2JZgzuVzq2/4EB9R+IruvuRDA32WGm+H5E4ZzRXXxaruP/EnL+ymfJlqqSjHxg IxD3LUItAgMBAAECggEAOE2naQ9tIZYw2EFxikZApVcooJrtx6ropMnzHbx4NBB2 K4mChAXFj184u77ZxmGT/jzGvFcI6LE0wWNbK0NOUV7hKZk/fPhkV3AQZrAMrAu4 IVi7PwAd3JkmA8F8XuebUDA5rDGDsgL8GD9baFJA58abeLs9eMGyuF4XgOUh4bip hgHa76O2rcDWNY5HZqqRslw75FzlYkB0PCts/UJxSswj70kTTihyOhDlrm2TnyxI ne54UbGRrpfs9wiheSGLjDG81qZToBHQDwoAnjjZhu1VCaBISuGbgZrxyyRyqdnn xPW+KczMv04XyvF7v6Pz+bUEppalLXGiXnH5UtWvZQKBgQDTPCdMpNE/hwlq4nAw Kf42zIBWfbnMLVWYoeDiAOhtl9XAUAXn76xe6Rvo0qeAo67yejdbJfRq3HvGyw+q 4PS8r9gXYmLYIPQxSoLL5+rFoBCN3qFippfjLB1j32mp7+15KjRj8FF2r6xIN8fu XatSRsaqmvCWYLDRv/rbHnxwkwKBgQDLkyfFLF7BtwtPWKdqrwOM7ip1UKh+oDBS vkCQ08aEFRBU7T3jChsx5GbaW6zmsSBwBwcrHclpSkz7n3aq19DDWObJR2p80Fma rsXeIcvtEpkvT3pVX268P5d+XGs1kxgFunqTysG9yChW+xzcs5MdKBzuMPPn7rL8 MKAzdar6PwKBgEypkzW8x3h/4Moa3k6MnwdyVs2NGaZheaRIc95yJ+jGZzxBjrMr h+p2PbvU4BfO0AqOkpKRBtDVrlJqlggVVp04UHvEKE16QEW3Xhr0037f5cInX3j3 Lz6yXwRFLAsR2aTUzWjL6jTh8uvO2s/GzQuyRh3a16Ar/WBShY+K0+zjAoGATnLT xZjWnyHRmu8X/PWakamJ9RFzDPDgDlLAgM8LVgTj+UY/LgnL9wsEU6s2UuP5ExKy QXxGDGwUhHar/SQTj+Pnc7Mwpw6HKSOmnnY5po8fNusSwml3O9XppEkrC0c236Y/ 7EobJO5IFVTJh4cv7vFxTJzSsRL8KFD4uzvh+nMCgYEAqY8NBYtIgNJA2B6C6hHF +bG7v46434ZHFfGTmMQwzE4taVg7YRnzYESAlvK4bAP5ZXR90n7GRGFhrXzoMZ38 r0bw/q9rV+ReGda7/Bjf7ciCKiq0RODcHtf4IaskjPXCoQRGJtgCPLhWPfld6g9v /HTvO96xv9e3eG/PKSPog94= -----END PRIVATE KEY----- dependencies/pkg/mod/github.com/lib/pq@v1.10.9/certs/README0000644000000000000000000000025415024302467021730 0ustar rootrootThis directory contains certificates and private keys for testing some SSL-related functionality in Travis. Do NOT use these certificates for anything other than testing. dependencies/pkg/mod/github.com/lib/pq@v1.10.9/TESTS.md0000644000000000000000000000122215024302467021150 0ustar rootroot# Tests ## Running Tests `go test` is used for testing. A running PostgreSQL server is required, with the ability to log in. The database to connect to test with is "pqgotest," on "localhost" but these can be overridden using [environment variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). Example: PGHOST=/run/postgresql go test ## Benchmarks A benchmark suite can be run as part of the tests: go test -bench . ## Example setup (Docker) Run a postgres container: ``` docker run --expose 5432:5432 postgres ``` Run tests: ``` PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test ``` dependencies/pkg/mod/github.com/lib/pq@v1.10.9/copy.go0000644000000000000000000001674215024302467021242 0ustar rootrootpackage pq import ( "bytes" "context" "database/sql/driver" "encoding/binary" "errors" "fmt" "sync" ) var ( errCopyInClosed = errors.New("pq: copyin statement has already been closed") errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") errCopyToNotSupported = errors.New("pq: COPY TO is not supported") errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") errCopyInProgress = errors.New("pq: COPY in progress") ) // CopyIn creates a COPY FROM statement which can be prepared with // Tx.Prepare(). The target table should be visible in search_path. func CopyIn(table string, columns ...string) string { buffer := bytes.NewBufferString("COPY ") BufferQuoteIdentifier(table, buffer) buffer.WriteString(" (") makeStmt(buffer, columns...) return buffer.String() } // MakeStmt makes the stmt string for CopyIn and CopyInSchema. func makeStmt(buffer *bytes.Buffer, columns ...string) { //s := bytes.NewBufferString() for i, col := range columns { if i != 0 { buffer.WriteString(", ") } BufferQuoteIdentifier(col, buffer) } buffer.WriteString(") FROM STDIN") } // CopyInSchema creates a COPY FROM statement which can be prepared with // Tx.Prepare(). func CopyInSchema(schema, table string, columns ...string) string { buffer := bytes.NewBufferString("COPY ") BufferQuoteIdentifier(schema, buffer) buffer.WriteRune('.') BufferQuoteIdentifier(table, buffer) buffer.WriteString(" (") makeStmt(buffer, columns...) return buffer.String() } type copyin struct { cn *conn buffer []byte rowData chan []byte done chan bool closed bool mu struct { sync.Mutex err error driver.Result } } const ciBufferSize = 64 * 1024 // flush buffer before the buffer is filled up and needs reallocation const ciBufferFlushSize = 63 * 1024 func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { if !cn.isInTransaction() { return nil, errCopyNotSupportedOutsideTxn } ci := ©in{ cn: cn, buffer: make([]byte, 0, ciBufferSize), rowData: make(chan []byte), done: make(chan bool, 1), } // add CopyData identifier + 4 bytes for message length ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) b := cn.writeBuf('Q') b.string(q) cn.send(b) awaitCopyInResponse: for { t, r := cn.recv1() switch t { case 'G': if r.byte() != 0 { err = errBinaryCopyNotSupported break awaitCopyInResponse } go ci.resploop() return ci, nil case 'H': err = errCopyToNotSupported break awaitCopyInResponse case 'E': err = parseError(r) case 'Z': if err == nil { ci.setBad(driver.ErrBadConn) errorf("unexpected ReadyForQuery in response to COPY") } cn.processReadyForQuery(r) return nil, err default: ci.setBad(driver.ErrBadConn) errorf("unknown response for copy query: %q", t) } } // something went wrong, abort COPY before we return b = cn.writeBuf('f') b.string(err.Error()) cn.send(b) for { t, r := cn.recv1() switch t { case 'c', 'C', 'E': case 'Z': // correctly aborted, we're done cn.processReadyForQuery(r) return nil, err default: ci.setBad(driver.ErrBadConn) errorf("unknown response for CopyFail: %q", t) } } } func (ci *copyin) flush(buf []byte) { // set message length (without message identifier) binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) _, err := ci.cn.c.Write(buf) if err != nil { panic(err) } } func (ci *copyin) resploop() { for { var r readBuf t, err := ci.cn.recvMessage(&r) if err != nil { ci.setBad(driver.ErrBadConn) ci.setError(err) ci.done <- true return } switch t { case 'C': // complete res, _ := ci.cn.parseComplete(r.string()) ci.setResult(res) case 'N': if n := ci.cn.noticeHandler; n != nil { n(parseError(&r)) } case 'Z': ci.cn.processReadyForQuery(&r) ci.done <- true return case 'E': err := parseError(&r) ci.setError(err) default: ci.setBad(driver.ErrBadConn) ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) ci.done <- true return } } } func (ci *copyin) setBad(err error) { ci.cn.err.set(err) } func (ci *copyin) getBad() error { return ci.cn.err.get() } func (ci *copyin) err() error { ci.mu.Lock() err := ci.mu.err ci.mu.Unlock() return err } // setError() sets ci.err if one has not been set already. Caller must not be // holding ci.Mutex. func (ci *copyin) setError(err error) { ci.mu.Lock() if ci.mu.err == nil { ci.mu.err = err } ci.mu.Unlock() } func (ci *copyin) setResult(result driver.Result) { ci.mu.Lock() ci.mu.Result = result ci.mu.Unlock() } func (ci *copyin) getResult() driver.Result { ci.mu.Lock() result := ci.mu.Result ci.mu.Unlock() if result == nil { return driver.RowsAffected(0) } return result } func (ci *copyin) NumInput() int { return -1 } func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { return nil, ErrNotSupported } // Exec inserts values into the COPY stream. The insert is asynchronous // and Exec can return errors from previous Exec calls to the same // COPY stmt. // // You need to call Exec(nil) to sync the COPY stream and to get any // errors from pending data, since Stmt.Close() doesn't return errors // to the user. func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { if ci.closed { return nil, errCopyInClosed } if err := ci.getBad(); err != nil { return nil, err } defer ci.cn.errRecover(&err) if err := ci.err(); err != nil { return nil, err } if len(v) == 0 { if err := ci.Close(); err != nil { return driver.RowsAffected(0), err } return ci.getResult(), nil } numValues := len(v) for i, value := range v { ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) if i < numValues-1 { ci.buffer = append(ci.buffer, '\t') } } ci.buffer = append(ci.buffer, '\n') if len(ci.buffer) > ciBufferFlushSize { ci.flush(ci.buffer) // reset buffer, keep bytes for message identifier and length ci.buffer = ci.buffer[:5] } return driver.RowsAffected(0), nil } // CopyData inserts a raw string into the COPY stream. The insert is // asynchronous and CopyData can return errors from previous CopyData calls to // the same COPY stmt. // // You need to call Exec(nil) to sync the COPY stream and to get any // errors from pending data, since Stmt.Close() doesn't return errors // to the user. func (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) { if ci.closed { return nil, errCopyInClosed } if finish := ci.cn.watchCancel(ctx); finish != nil { defer finish() } if err := ci.getBad(); err != nil { return nil, err } defer ci.cn.errRecover(&err) if err := ci.err(); err != nil { return nil, err } ci.buffer = append(ci.buffer, []byte(line)...) ci.buffer = append(ci.buffer, '\n') if len(ci.buffer) > ciBufferFlushSize { ci.flush(ci.buffer) // reset buffer, keep bytes for message identifier and length ci.buffer = ci.buffer[:5] } return driver.RowsAffected(0), nil } func (ci *copyin) Close() (err error) { if ci.closed { // Don't do anything, we're already closed return nil } ci.closed = true if err := ci.getBad(); err != nil { return err } defer ci.cn.errRecover(&err) if len(ci.buffer) > 0 { ci.flush(ci.buffer) } // Avoid touching the scratch buffer as resploop could be using it. err = ci.cn.sendSimpleMessage('c') if err != nil { return err } <-ci.done ci.cn.inCopy = false if err := ci.err(); err != nil { return err } return nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/uuid_test.go0000644000000000000000000000207615024302467022270 0ustar rootrootpackage pq import ( "reflect" "strings" "testing" ) func TestDecodeUUIDBinaryError(t *testing.T) { t.Parallel() _, err := decodeUUIDBinary([]byte{0x12, 0x34}) if err == nil { t.Fatal("Expected error, got none") } if !strings.HasPrefix(err.Error(), "pq:") { t.Errorf("Expected error to start with %q, got %q", "pq:", err.Error()) } if !strings.Contains(err.Error(), "bad length: 2") { t.Errorf("Expected error to contain length, got %q", err.Error()) } } func BenchmarkDecodeUUIDBinary(b *testing.B) { x := []byte{0x03, 0xa3, 0x52, 0x2f, 0x89, 0x28, 0x49, 0x87, 0x84, 0xd6, 0x93, 0x7b, 0x36, 0xec, 0x27, 0x6f} for i := 0; i < b.N; i++ { decodeUUIDBinary(x) } } func TestDecodeUUIDBackend(t *testing.T) { db := openTestConn(t) defer db.Close() var s = "a0ecc91d-a13f-4fe4-9fce-7e09777cc70a" var scanned interface{} err := db.QueryRow(`SELECT $1::uuid`, s).Scan(&scanned) if err != nil { t.Fatalf("Expected no error, got %v", err) } if !reflect.DeepEqual(scanned, []byte(s)) { t.Errorf("Expected []byte(%q), got %T(%q)", s, scanned, scanned) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/ssl.go0000644000000000000000000001447115024302467021066 0ustar rootrootpackage pq import ( "crypto/tls" "crypto/x509" "io/ioutil" "net" "os" "os/user" "path/filepath" "strings" ) // ssl generates a function to upgrade a net.Conn based on the "sslmode" and // related settings. The function is nil when no upgrade should take place. func ssl(o values) (func(net.Conn) (net.Conn, error), error) { verifyCaOnly := false tlsConf := tls.Config{} switch mode := o["sslmode"]; mode { // "require" is the default. case "", "require": // We must skip TLS's own verification since it requires full // verification since Go 1.3. tlsConf.InsecureSkipVerify = true // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: // // Note: For backwards compatibility with earlier versions of // PostgreSQL, if a root CA file exists, the behavior of // sslmode=require will be the same as that of verify-ca, meaning the // server certificate is validated against the CA. Relying on this // behavior is discouraged, and applications that need certificate // validation should always use verify-ca or verify-full. if sslrootcert, ok := o["sslrootcert"]; ok { if _, err := os.Stat(sslrootcert); err == nil { verifyCaOnly = true } else { delete(o, "sslrootcert") } } case "verify-ca": // We must skip TLS's own verification since it requires full // verification since Go 1.3. tlsConf.InsecureSkipVerify = true verifyCaOnly = true case "verify-full": tlsConf.ServerName = o["host"] case "disable": return nil, nil default: return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) } // Set Server Name Indication (SNI), if enabled by connection parameters. // By default SNI is on, any value which is not starting with "1" disables // SNI -- that is the same check vanilla libpq uses. if sslsni := o["sslsni"]; sslsni == "" || strings.HasPrefix(sslsni, "1") { // RFC 6066 asks to not set SNI if the host is a literal IP address (IPv4 // or IPv6). This check is coded already crypto.tls.hostnameInSNI, so // just always set ServerName here and let crypto/tls do the filtering. tlsConf.ServerName = o["host"] } err := sslClientCertificates(&tlsConf, o) if err != nil { return nil, err } err = sslCertificateAuthority(&tlsConf, o) if err != nil { return nil, err } // Accept renegotiation requests initiated by the backend. // // Renegotiation was deprecated then removed from PostgreSQL 9.5, but // the default configuration of older versions has it enabled. Redshift // also initiates renegotiations and cannot be reconfigured. tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient return func(conn net.Conn) (net.Conn, error) { client := tls.Client(conn, &tlsConf) if verifyCaOnly { err := sslVerifyCertificateAuthority(client, &tlsConf) if err != nil { return nil, err } } return client, nil }, nil } // sslClientCertificates adds the certificate specified in the "sslcert" and // "sslkey" settings, or if they aren't set, from the .postgresql directory // in the user's home directory. The configured files must exist and have // the correct permissions. func sslClientCertificates(tlsConf *tls.Config, o values) error { sslinline := o["sslinline"] if sslinline == "true" { cert, err := tls.X509KeyPair([]byte(o["sslcert"]), []byte(o["sslkey"])) if err != nil { return err } tlsConf.Certificates = []tls.Certificate{cert} return nil } // user.Current() might fail when cross-compiling. We have to ignore the // error and continue without home directory defaults, since we wouldn't // know from where to load them. user, _ := user.Current() // In libpq, the client certificate is only loaded if the setting is not blank. // // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 sslcert := o["sslcert"] if len(sslcert) == 0 && user != nil { sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") } // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 if len(sslcert) == 0 { return nil } // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 if _, err := os.Stat(sslcert); os.IsNotExist(err) { return nil } else if err != nil { return err } // In libpq, the ssl key is only loaded if the setting is not blank. // // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 sslkey := o["sslkey"] if len(sslkey) == 0 && user != nil { sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") } if len(sslkey) > 0 { if err := sslKeyPermissions(sslkey); err != nil { return err } } cert, err := tls.LoadX509KeyPair(sslcert, sslkey) if err != nil { return err } tlsConf.Certificates = []tls.Certificate{cert} return nil } // sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. func sslCertificateAuthority(tlsConf *tls.Config, o values) error { // In libpq, the root certificate is only loaded if the setting is not blank. // // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { tlsConf.RootCAs = x509.NewCertPool() sslinline := o["sslinline"] var cert []byte if sslinline == "true" { cert = []byte(sslrootcert) } else { var err error cert, err = ioutil.ReadFile(sslrootcert) if err != nil { return err } } if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { return fmterrorf("couldn't parse pem in sslrootcert") } } return nil } // sslVerifyCertificateAuthority carries out a TLS handshake to the server and // verifies the presented certificate against the CA, i.e. the one specified in // sslrootcert or the system CA if sslrootcert was not specified. func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { err := client.Handshake() if err != nil { return err } certs := client.ConnectionState().PeerCertificates opts := x509.VerifyOptions{ DNSName: client.ConnectionState().ServerName, Intermediates: x509.NewCertPool(), Roots: tlsConf.RootCAs, } for i, cert := range certs { if i == 0 { continue } opts.Intermediates.AddCert(cert) } _, err = certs[0].Verify(opts) return err } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/go19_test.go0000644000000000000000000000412015024302467022071 0ustar rootroot//go:build go1.9 // +build go1.9 package pq import ( "context" "database/sql" "database/sql/driver" "reflect" "testing" ) func TestPing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) db := openTestConn(t) defer db.Close() if _, ok := reflect.TypeOf(db).MethodByName("Conn"); !ok { t.Skipf("Conn method undefined on type %T, skipping test (requires at least go1.9)", db) } if err := db.PingContext(ctx); err != nil { t.Fatal("expected Ping to succeed") } defer cancel() // grab a connection conn, err := db.Conn(ctx) if err != nil { t.Fatal(err) } // start a transaction and read backend pid of our connection tx, err := conn.BeginTx(ctx, &sql.TxOptions{ Isolation: sql.LevelDefault, ReadOnly: true, }) if err != nil { t.Fatal(err) } rows, err := tx.Query("SELECT pg_backend_pid()") if err != nil { t.Fatal(err) } defer rows.Close() // read the pid from result var pid int for rows.Next() { if err := rows.Scan(&pid); err != nil { t.Fatal(err) } } if rows.Err() != nil { t.Fatal(err) } // Fail the transaction and make sure we can still ping. if _, err := tx.Query("INVALID SQL"); err == nil { t.Fatal("expected error") } if err := conn.PingContext(ctx); err != nil { t.Fatal(err) } if err := tx.Rollback(); err != nil { t.Fatal(err) } // kill the process which handles our connection and test if the ping fails if _, err := db.Exec("SELECT pg_terminate_backend($1)", pid); err != nil { t.Fatal(err) } if err := conn.PingContext(ctx); err != driver.ErrBadConn { t.Fatalf("expected error %s, instead got %s", driver.ErrBadConn, err) } } func TestCommitInFailedTransactionWithCancelContext(t *testing.T) { db := openTestConn(t) defer db.Close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() txn, err := db.BeginTx(ctx, nil) if err != nil { t.Fatal(err) } rows, err := txn.Query("SELECT error") if err == nil { rows.Close() t.Fatal("expected failure") } err = txn.Commit() if err != ErrInFailedTransaction { t.Fatalf("expected ErrInFailedTransaction; got %#v", err) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/LICENSE.md0000644000000000000000000000212615024302467021334 0ustar rootrootCopyright (c) 2011-2013, 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dependencies/pkg/mod/github.com/lib/pq@v1.10.9/ssl_test.go0000644000000000000000000002737015024302467022127 0ustar rootrootpackage pq // This file contains SSL tests import ( "bytes" _ "crypto/sha256" "crypto/tls" "crypto/x509" "database/sql" "fmt" "io" "net" "os" "path/filepath" "strings" "testing" "time" ) func maybeSkipSSLTests(t *testing.T) { // Require some special variables for testing certificates if os.Getenv("PQSSLCERTTEST_PATH") == "" { t.Skip("PQSSLCERTTEST_PATH not set, skipping SSL tests") } value := os.Getenv("PQGOSSLTESTS") if value == "" || value == "0" { t.Skip("PQGOSSLTESTS not enabled, skipping SSL tests") } else if value != "1" { t.Fatalf("unexpected value %q for PQGOSSLTESTS", value) } } func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) { db, err := openTestConnConninfo(conninfo) if err != nil { // should never fail t.Fatal(err) } // Do something with the connection to see whether it's working or not. tx, err := db.Begin() if err == nil { return db, tx.Rollback() } _ = db.Close() return nil, err } func checkSSLSetup(t *testing.T, conninfo string) { _, err := openSSLConn(t, conninfo) if pge, ok := err.(*Error); ok { if pge.Code.Name() != "invalid_authorization_specification" { t.Fatalf("unexpected error code '%s'", pge.Code.Name()) } } else { t.Fatalf("expected %T, got %v", (*Error)(nil), err) } } // Connect over SSL and run a simple query to test the basics func TestSSLConnection(t *testing.T) { maybeSkipSSLTests(t) // Environment sanity check: should fail without SSL checkSSLSetup(t, "sslmode=disable user=pqgossltest") db, err := openSSLConn(t, "sslmode=require user=pqgossltest") if err != nil { t.Fatal(err) } rows, err := db.Query("SELECT 1") if err != nil { t.Fatal(err) } rows.Close() } // Test sslmode=verify-full func TestSSLVerifyFull(t *testing.T) { maybeSkipSSLTests(t) // Environment sanity check: should fail without SSL checkSSLSetup(t, "sslmode=disable user=pqgossltest") // Not OK according to the system CA _, err := openSSLConn(t, "host=postgres sslmode=verify-full user=pqgossltest") if err == nil { t.Fatal("expected error") } _, ok := err.(x509.UnknownAuthorityError) if !ok { _, ok := err.(x509.HostnameError) if !ok { t.Fatalf("expected x509.UnknownAuthorityError or x509.HostnameError, got %#+v", err) } } rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") rootCert := "sslrootcert=" + rootCertPath + " " // No match on Common Name _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-full user=pqgossltest") if err == nil { t.Fatal("expected error") } _, ok = err.(x509.HostnameError) if !ok { t.Fatalf("expected x509.HostnameError, got %#+v", err) } // OK _, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-full user=pqgossltest") if err != nil { t.Fatal(err) } } // Test sslmode=require sslrootcert=rootCertPath func TestSSLRequireWithRootCert(t *testing.T) { maybeSkipSSLTests(t) // Environment sanity check: should fail without SSL checkSSLSetup(t, "sslmode=disable user=pqgossltest") bogusRootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "bogus_root.crt") bogusRootCert := "sslrootcert=" + bogusRootCertPath + " " // Not OK according to the bogus CA _, err := openSSLConn(t, bogusRootCert+"host=postgres sslmode=require user=pqgossltest") if err == nil { t.Fatal("expected error") } _, ok := err.(x509.UnknownAuthorityError) if !ok { t.Fatalf("expected x509.UnknownAuthorityError, got %s, %#+v", err, err) } nonExistentCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "non_existent.crt") nonExistentCert := "sslrootcert=" + nonExistentCertPath + " " // No match on Common Name, but that's OK because we're not validating anything. _, err = openSSLConn(t, nonExistentCert+"host=127.0.0.1 sslmode=require user=pqgossltest") if err != nil { t.Fatal(err) } rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") rootCert := "sslrootcert=" + rootCertPath + " " // No match on Common Name, but that's OK because we're not validating the CN. _, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=require user=pqgossltest") if err != nil { t.Fatal(err) } // Everything OK _, err = openSSLConn(t, rootCert+"host=postgres sslmode=require user=pqgossltest") if err != nil { t.Fatal(err) } } // Test sslmode=verify-ca func TestSSLVerifyCA(t *testing.T) { maybeSkipSSLTests(t) // Environment sanity check: should fail without SSL checkSSLSetup(t, "sslmode=disable user=pqgossltest") // Not OK according to the system CA { _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest") if _, ok := err.(x509.UnknownAuthorityError); !ok { t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err) } } // Still not OK according to the system CA; empty sslrootcert is treated as unspecified. { _, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest sslrootcert=''") if _, ok := err.(x509.UnknownAuthorityError); !ok { t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err) } } rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt") rootCert := "sslrootcert=" + rootCertPath + " " // No match on Common Name, but that's OK if _, err := openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest"); err != nil { t.Fatal(err) } // Everything OK if _, err := openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest"); err != nil { t.Fatal(err) } } // Authenticate over SSL using client certificates func TestSSLClientCertificates(t *testing.T) { maybeSkipSSLTests(t) // Environment sanity check: should fail without SSL checkSSLSetup(t, "sslmode=disable user=pqgossltest") const baseinfo = "sslmode=require user=pqgosslcert" // Certificate not specified, should fail { _, err := openSSLConn(t, baseinfo) if pge, ok := err.(*Error); ok { if pge.Code.Name() != "invalid_authorization_specification" { t.Fatalf("unexpected error code '%s'", pge.Code.Name()) } } else { t.Fatalf("expected %T, got %v", (*Error)(nil), err) } } // Empty certificate specified, should fail { _, err := openSSLConn(t, baseinfo+" sslcert=''") if pge, ok := err.(*Error); ok { if pge.Code.Name() != "invalid_authorization_specification" { t.Fatalf("unexpected error code '%s'", pge.Code.Name()) } } else { t.Fatalf("expected %T, got %v", (*Error)(nil), err) } } // Non-existent certificate specified, should fail { _, err := openSSLConn(t, baseinfo+" sslcert=/tmp/filedoesnotexist") if pge, ok := err.(*Error); ok { if pge.Code.Name() != "invalid_authorization_specification" { t.Fatalf("unexpected error code '%s'", pge.Code.Name()) } } else { t.Fatalf("expected %T, got %v", (*Error)(nil), err) } } certpath, ok := os.LookupEnv("PQSSLCERTTEST_PATH") if !ok { t.Fatalf("PQSSLCERTTEST_PATH not present in environment") } sslcert := filepath.Join(certpath, "postgresql.crt") // Cert present, key not specified, should fail { _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert) if _, ok := err.(*os.PathError); !ok { t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err) } } // Cert present, empty key specified, should fail { _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=''") if _, ok := err.(*os.PathError); !ok { t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err) } } // Cert present, non-existent key, should fail { _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=/tmp/filedoesnotexist") if _, ok := err.(*os.PathError); !ok { t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err) } } // Key has wrong permissions (passing the cert as the key), should fail if _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslcert); err != ErrSSLKeyHasWorldPermissions { t.Fatalf("expected %s, got %#+v", ErrSSLKeyHasWorldPermissions, err) } sslkey := filepath.Join(certpath, "postgresql.key") // Should work if db, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslkey); err != nil { t.Fatal(err) } else { rows, err := db.Query("SELECT 1") if err != nil { t.Fatal(err) } if err := rows.Close(); err != nil { t.Fatal(err) } if err := db.Close(); err != nil { t.Fatal(err) } } } // Check that clint sends SNI data when `sslsni` is not disabled func TestSNISupport(t *testing.T) { t.Parallel() tests := []struct { name string conn_param string hostname string expected_sni string }{ { name: "SNI is set by default", conn_param: "", hostname: "localhost", expected_sni: "localhost", }, { name: "SNI is passed when asked for", conn_param: "sslsni=1", hostname: "localhost", expected_sni: "localhost", }, { name: "SNI is not passed when disabled", conn_param: "sslsni=0", hostname: "localhost", expected_sni: "", }, { name: "SNI is not set for IPv4", conn_param: "", hostname: "127.0.0.1", expected_sni: "", }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() // Start mock postgres server on OS-provided port listener, err := net.Listen("tcp", "127.0.0.1:") if err != nil { t.Fatal(err) } serverErrChan := make(chan error, 1) serverSNINameChan := make(chan string, 1) go mockPostgresSSL(listener, serverErrChan, serverSNINameChan) defer listener.Close() defer close(serverErrChan) defer close(serverSNINameChan) // Try to establish a connection with the mock server. Connection will error out after TLS // clientHello, but it is enough to catch SNI data on the server side port := strings.Split(listener.Addr().String(), ":")[1] connStr := fmt.Sprintf("sslmode=require host=%s port=%s %s", tt.hostname, port, tt.conn_param) // We are okay to skip this error as we are polling serverErrChan and we'll get an error // or timeout from the server side in case of problems here. db, _ := sql.Open("postgres", connStr) _, _ = db.Exec("SELECT 1") // Check SNI data select { case sniHost := <-serverSNINameChan: if sniHost != tt.expected_sni { t.Fatalf("Expected SNI to be 'localhost', got '%+v' instead", sniHost) } case err = <-serverErrChan: t.Fatalf("mock server failed with error: %+v", err) case <-time.After(time.Second): t.Fatal("exceeded connection timeout without erroring out") } }) } } // Make a postgres mock server to test TLS SNI // // Accepts postgres StartupMessage and handles TLS clientHello, then closes a connection. // While reading clientHello catch passed SNI data and report it to nameChan. func mockPostgresSSL(listener net.Listener, errChan chan error, nameChan chan string) { var sniHost string conn, err := listener.Accept() if err != nil { errChan <- err return } defer conn.Close() err = conn.SetDeadline(time.Now().Add(time.Second)) if err != nil { errChan <- err return } // Receive StartupMessage with SSL Request startupMessage := make([]byte, 8) if _, err := io.ReadFull(conn, startupMessage); err != nil { errChan <- err return } // StartupMessage: first four bytes -- total len = 8, last four bytes SslRequestNumber if !bytes.Equal(startupMessage, []byte{0, 0, 0, 0x8, 0x4, 0xd2, 0x16, 0x2f}) { errChan <- fmt.Errorf("unexpected startup message: %#v", startupMessage) return } // Respond with SSLOk _, err = conn.Write([]byte("S")) if err != nil { errChan <- err return } // Set up TLS context to catch clientHello. It will always error out during handshake // as no certificate is set. srv := tls.Server(conn, &tls.Config{ GetConfigForClient: func(argHello *tls.ClientHelloInfo) (*tls.Config, error) { sniHost = argHello.ServerName return nil, nil }, }) defer srv.Close() // Do the TLS handshake ignoring errors _ = srv.Handshake() nameChan <- sniHost } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/go.mod0000644000000000000000000000004215024302467021031 0ustar rootrootmodule github.com/lib/pq go 1.13 dependencies/pkg/mod/github.com/lib/pq@v1.10.9/oid/0000755000000000000000000000000015024302467020502 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/oid/doc.go0000644000000000000000000000021115024302467021570 0ustar rootroot// Package oid contains OID constants // as defined by the Postgres server. package oid // Oid is a Postgres Object ID. type Oid uint32 dependencies/pkg/mod/github.com/lib/pq@v1.10.9/oid/gen.go0000644000000000000000000000331615024302467021605 0ustar rootroot//go:build ignore // +build ignore // Generate the table of OID values // Run with 'go run gen.go'. package main import ( "database/sql" "fmt" "log" "os" "os/exec" "strings" _ "github.com/lib/pq" ) // OID represent a postgres Object Identifier Type. type OID struct { ID int Type string } // Name returns an upper case version of the oid type. func (o OID) Name() string { return strings.ToUpper(o.Type) } func main() { datname := os.Getenv("PGDATABASE") sslmode := os.Getenv("PGSSLMODE") if datname == "" { os.Setenv("PGDATABASE", "pqgotest") } if sslmode == "" { os.Setenv("PGSSLMODE", "disable") } db, err := sql.Open("postgres", "") if err != nil { log.Fatal(err) } rows, err := db.Query(` SELECT typname, oid FROM pg_type WHERE oid < 10000 ORDER BY oid; `) if err != nil { log.Fatal(err) } oids := make([]*OID, 0) for rows.Next() { var oid OID if err = rows.Scan(&oid.Type, &oid.ID); err != nil { log.Fatal(err) } oids = append(oids, &oid) } if err = rows.Err(); err != nil { log.Fatal(err) } cmd := exec.Command("gofmt") cmd.Stderr = os.Stderr w, err := cmd.StdinPipe() if err != nil { log.Fatal(err) } f, err := os.Create("types.go") if err != nil { log.Fatal(err) } cmd.Stdout = f err = cmd.Start() if err != nil { log.Fatal(err) } fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.") fmt.Fprintln(w, "\npackage oid") fmt.Fprintln(w, "const (") for _, oid := range oids { fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID) } fmt.Fprintln(w, ")") fmt.Fprintln(w, "var TypeName = map[Oid]string{") for _, oid := range oids { fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name()) } fmt.Fprintln(w, "}") w.Close() cmd.Wait() } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/oid/types.go0000644000000000000000000002457615024302467022213 0ustar rootroot// Code generated by gen.go. DO NOT EDIT. package oid const ( T_bool Oid = 16 T_bytea Oid = 17 T_char Oid = 18 T_name Oid = 19 T_int8 Oid = 20 T_int2 Oid = 21 T_int2vector Oid = 22 T_int4 Oid = 23 T_regproc Oid = 24 T_text Oid = 25 T_oid Oid = 26 T_tid Oid = 27 T_xid Oid = 28 T_cid Oid = 29 T_oidvector Oid = 30 T_pg_ddl_command Oid = 32 T_pg_type Oid = 71 T_pg_attribute Oid = 75 T_pg_proc Oid = 81 T_pg_class Oid = 83 T_json Oid = 114 T_xml Oid = 142 T__xml Oid = 143 T_pg_node_tree Oid = 194 T__json Oid = 199 T_smgr Oid = 210 T_index_am_handler Oid = 325 T_point Oid = 600 T_lseg Oid = 601 T_path Oid = 602 T_box Oid = 603 T_polygon Oid = 604 T_line Oid = 628 T__line Oid = 629 T_cidr Oid = 650 T__cidr Oid = 651 T_float4 Oid = 700 T_float8 Oid = 701 T_abstime Oid = 702 T_reltime Oid = 703 T_tinterval Oid = 704 T_unknown Oid = 705 T_circle Oid = 718 T__circle Oid = 719 T_money Oid = 790 T__money Oid = 791 T_macaddr Oid = 829 T_inet Oid = 869 T__bool Oid = 1000 T__bytea Oid = 1001 T__char Oid = 1002 T__name Oid = 1003 T__int2 Oid = 1005 T__int2vector Oid = 1006 T__int4 Oid = 1007 T__regproc Oid = 1008 T__text Oid = 1009 T__tid Oid = 1010 T__xid Oid = 1011 T__cid Oid = 1012 T__oidvector Oid = 1013 T__bpchar Oid = 1014 T__varchar Oid = 1015 T__int8 Oid = 1016 T__point Oid = 1017 T__lseg Oid = 1018 T__path Oid = 1019 T__box Oid = 1020 T__float4 Oid = 1021 T__float8 Oid = 1022 T__abstime Oid = 1023 T__reltime Oid = 1024 T__tinterval Oid = 1025 T__polygon Oid = 1027 T__oid Oid = 1028 T_aclitem Oid = 1033 T__aclitem Oid = 1034 T__macaddr Oid = 1040 T__inet Oid = 1041 T_bpchar Oid = 1042 T_varchar Oid = 1043 T_date Oid = 1082 T_time Oid = 1083 T_timestamp Oid = 1114 T__timestamp Oid = 1115 T__date Oid = 1182 T__time Oid = 1183 T_timestamptz Oid = 1184 T__timestamptz Oid = 1185 T_interval Oid = 1186 T__interval Oid = 1187 T__numeric Oid = 1231 T_pg_database Oid = 1248 T__cstring Oid = 1263 T_timetz Oid = 1266 T__timetz Oid = 1270 T_bit Oid = 1560 T__bit Oid = 1561 T_varbit Oid = 1562 T__varbit Oid = 1563 T_numeric Oid = 1700 T_refcursor Oid = 1790 T__refcursor Oid = 2201 T_regprocedure Oid = 2202 T_regoper Oid = 2203 T_regoperator Oid = 2204 T_regclass Oid = 2205 T_regtype Oid = 2206 T__regprocedure Oid = 2207 T__regoper Oid = 2208 T__regoperator Oid = 2209 T__regclass Oid = 2210 T__regtype Oid = 2211 T_record Oid = 2249 T_cstring Oid = 2275 T_any Oid = 2276 T_anyarray Oid = 2277 T_void Oid = 2278 T_trigger Oid = 2279 T_language_handler Oid = 2280 T_internal Oid = 2281 T_opaque Oid = 2282 T_anyelement Oid = 2283 T__record Oid = 2287 T_anynonarray Oid = 2776 T_pg_authid Oid = 2842 T_pg_auth_members Oid = 2843 T__txid_snapshot Oid = 2949 T_uuid Oid = 2950 T__uuid Oid = 2951 T_txid_snapshot Oid = 2970 T_fdw_handler Oid = 3115 T_pg_lsn Oid = 3220 T__pg_lsn Oid = 3221 T_tsm_handler Oid = 3310 T_anyenum Oid = 3500 T_tsvector Oid = 3614 T_tsquery Oid = 3615 T_gtsvector Oid = 3642 T__tsvector Oid = 3643 T__gtsvector Oid = 3644 T__tsquery Oid = 3645 T_regconfig Oid = 3734 T__regconfig Oid = 3735 T_regdictionary Oid = 3769 T__regdictionary Oid = 3770 T_jsonb Oid = 3802 T__jsonb Oid = 3807 T_anyrange Oid = 3831 T_event_trigger Oid = 3838 T_int4range Oid = 3904 T__int4range Oid = 3905 T_numrange Oid = 3906 T__numrange Oid = 3907 T_tsrange Oid = 3908 T__tsrange Oid = 3909 T_tstzrange Oid = 3910 T__tstzrange Oid = 3911 T_daterange Oid = 3912 T__daterange Oid = 3913 T_int8range Oid = 3926 T__int8range Oid = 3927 T_pg_shseclabel Oid = 4066 T_regnamespace Oid = 4089 T__regnamespace Oid = 4090 T_regrole Oid = 4096 T__regrole Oid = 4097 ) var TypeName = map[Oid]string{ T_bool: "BOOL", T_bytea: "BYTEA", T_char: "CHAR", T_name: "NAME", T_int8: "INT8", T_int2: "INT2", T_int2vector: "INT2VECTOR", T_int4: "INT4", T_regproc: "REGPROC", T_text: "TEXT", T_oid: "OID", T_tid: "TID", T_xid: "XID", T_cid: "CID", T_oidvector: "OIDVECTOR", T_pg_ddl_command: "PG_DDL_COMMAND", T_pg_type: "PG_TYPE", T_pg_attribute: "PG_ATTRIBUTE", T_pg_proc: "PG_PROC", T_pg_class: "PG_CLASS", T_json: "JSON", T_xml: "XML", T__xml: "_XML", T_pg_node_tree: "PG_NODE_TREE", T__json: "_JSON", T_smgr: "SMGR", T_index_am_handler: "INDEX_AM_HANDLER", T_point: "POINT", T_lseg: "LSEG", T_path: "PATH", T_box: "BOX", T_polygon: "POLYGON", T_line: "LINE", T__line: "_LINE", T_cidr: "CIDR", T__cidr: "_CIDR", T_float4: "FLOAT4", T_float8: "FLOAT8", T_abstime: "ABSTIME", T_reltime: "RELTIME", T_tinterval: "TINTERVAL", T_unknown: "UNKNOWN", T_circle: "CIRCLE", T__circle: "_CIRCLE", T_money: "MONEY", T__money: "_MONEY", T_macaddr: "MACADDR", T_inet: "INET", T__bool: "_BOOL", T__bytea: "_BYTEA", T__char: "_CHAR", T__name: "_NAME", T__int2: "_INT2", T__int2vector: "_INT2VECTOR", T__int4: "_INT4", T__regproc: "_REGPROC", T__text: "_TEXT", T__tid: "_TID", T__xid: "_XID", T__cid: "_CID", T__oidvector: "_OIDVECTOR", T__bpchar: "_BPCHAR", T__varchar: "_VARCHAR", T__int8: "_INT8", T__point: "_POINT", T__lseg: "_LSEG", T__path: "_PATH", T__box: "_BOX", T__float4: "_FLOAT4", T__float8: "_FLOAT8", T__abstime: "_ABSTIME", T__reltime: "_RELTIME", T__tinterval: "_TINTERVAL", T__polygon: "_POLYGON", T__oid: "_OID", T_aclitem: "ACLITEM", T__aclitem: "_ACLITEM", T__macaddr: "_MACADDR", T__inet: "_INET", T_bpchar: "BPCHAR", T_varchar: "VARCHAR", T_date: "DATE", T_time: "TIME", T_timestamp: "TIMESTAMP", T__timestamp: "_TIMESTAMP", T__date: "_DATE", T__time: "_TIME", T_timestamptz: "TIMESTAMPTZ", T__timestamptz: "_TIMESTAMPTZ", T_interval: "INTERVAL", T__interval: "_INTERVAL", T__numeric: "_NUMERIC", T_pg_database: "PG_DATABASE", T__cstring: "_CSTRING", T_timetz: "TIMETZ", T__timetz: "_TIMETZ", T_bit: "BIT", T__bit: "_BIT", T_varbit: "VARBIT", T__varbit: "_VARBIT", T_numeric: "NUMERIC", T_refcursor: "REFCURSOR", T__refcursor: "_REFCURSOR", T_regprocedure: "REGPROCEDURE", T_regoper: "REGOPER", T_regoperator: "REGOPERATOR", T_regclass: "REGCLASS", T_regtype: "REGTYPE", T__regprocedure: "_REGPROCEDURE", T__regoper: "_REGOPER", T__regoperator: "_REGOPERATOR", T__regclass: "_REGCLASS", T__regtype: "_REGTYPE", T_record: "RECORD", T_cstring: "CSTRING", T_any: "ANY", T_anyarray: "ANYARRAY", T_void: "VOID", T_trigger: "TRIGGER", T_language_handler: "LANGUAGE_HANDLER", T_internal: "INTERNAL", T_opaque: "OPAQUE", T_anyelement: "ANYELEMENT", T__record: "_RECORD", T_anynonarray: "ANYNONARRAY", T_pg_authid: "PG_AUTHID", T_pg_auth_members: "PG_AUTH_MEMBERS", T__txid_snapshot: "_TXID_SNAPSHOT", T_uuid: "UUID", T__uuid: "_UUID", T_txid_snapshot: "TXID_SNAPSHOT", T_fdw_handler: "FDW_HANDLER", T_pg_lsn: "PG_LSN", T__pg_lsn: "_PG_LSN", T_tsm_handler: "TSM_HANDLER", T_anyenum: "ANYENUM", T_tsvector: "TSVECTOR", T_tsquery: "TSQUERY", T_gtsvector: "GTSVECTOR", T__tsvector: "_TSVECTOR", T__gtsvector: "_GTSVECTOR", T__tsquery: "_TSQUERY", T_regconfig: "REGCONFIG", T__regconfig: "_REGCONFIG", T_regdictionary: "REGDICTIONARY", T__regdictionary: "_REGDICTIONARY", T_jsonb: "JSONB", T__jsonb: "_JSONB", T_anyrange: "ANYRANGE", T_event_trigger: "EVENT_TRIGGER", T_int4range: "INT4RANGE", T__int4range: "_INT4RANGE", T_numrange: "NUMRANGE", T__numrange: "_NUMRANGE", T_tsrange: "TSRANGE", T__tsrange: "_TSRANGE", T_tstzrange: "TSTZRANGE", T__tstzrange: "_TSTZRANGE", T_daterange: "DATERANGE", T__daterange: "_DATERANGE", T_int8range: "INT8RANGE", T__int8range: "_INT8RANGE", T_pg_shseclabel: "PG_SHSECLABEL", T_regnamespace: "REGNAMESPACE", T__regnamespace: "_REGNAMESPACE", T_regrole: "REGROLE", T__regrole: "_REGROLE", } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/doc.go0000644000000000000000000002261715024302467021033 0ustar rootroot/* Package pq is a pure Go Postgres driver for the database/sql package. In most cases clients will use the database/sql package instead of using this package directly. For example: import ( "database/sql" _ "github.com/lib/pq" ) func main() { connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" db, err := sql.Open("postgres", connStr) if err != nil { log.Fatal(err) } age := 21 rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) … } You can also connect to a database using a URL. For example: connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" db, err := sql.Open("postgres", connStr) Connection String Parameters Similarly to libpq, when establishing a connection using pq you are expected to supply a connection string containing zero or more parameters. A subset of the connection parameters supported by libpq are also supported by pq. Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) directly in the connection string. This is different from libpq, which does not allow run-time parameters in the connection string, instead requiring you to supply them in the options parameter. For compatibility with libpq, the following special connection parameters are supported: * dbname - The name of the database to connect to * user - The user to sign in as * password - The user's password * host - The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) * port - The port to bind to. (default is 5432) * sslmode - Whether or not to use SSL (default is require, this is not the default for libpq) * fallback_application_name - An application_name to fall back to if one isn't provided. * connect_timeout - Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. * sslcert - Cert file location. The file must contain PEM encoded data. * sslkey - Key file location. The file must contain PEM encoded data. * sslrootcert - The location of the root certificate file. The file must contain PEM encoded data. Valid values for sslmode are: * disable - No SSL * require - Always SSL (skip verification) * verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA) * verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate) See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING for more information about connection string parameters. Use single quotes for values that contain whitespace: "user=pqgotest password='with spaces'" A backslash will escape the next character in values: "user=space\ man password='it\'s valid'" Note that the connection parameter client_encoding (which sets the text encoding for the connection) may be set but must be "UTF8", matching with the same rules as Postgres. It is an error to provide any other value. In addition to the parameters listed above, any run-time parameter that can be set at backend start time can be set in the connection string. For more information, see http://www.postgresql.org/docs/current/static/runtime-config.html. Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html supported by libpq are also supported by pq. If any of the environment variables not supported by pq are set, pq will panic during connection establishment. Environment variables have a lower precedence than explicitly provided connection parameters. The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html is supported, but on Windows PGPASSFILE must be specified explicitly. Queries database/sql does not dictate any specific format for parameter markers in query strings, and pq uses the Postgres-native ordinal markers, as shown above. The same marker can be reused for the same parameter: rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 OR age BETWEEN $2 AND $2 + 3`, "orange", 64) pq does not support the LastInsertId() method of the Result type in database/sql. To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres RETURNING clause with a standard Query or QueryRow call: var userid int err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) For more details on RETURNING, see the Postgres documentation: http://www.postgresql.org/docs/current/static/sql-insert.html http://www.postgresql.org/docs/current/static/sql-update.html http://www.postgresql.org/docs/current/static/sql-delete.html For additional instructions on querying see the documentation for the database/sql package. Data Types Parameters pass through driver.DefaultParameterConverter before they are handled by this package. When the binary_parameters connection option is enabled, []byte values are sent directly to the backend as data in binary format. This package returns the following types for values from the PostgreSQL backend: - integer types smallint, integer, and bigint are returned as int64 - floating-point types real and double precision are returned as float64 - character types char, varchar, and text are returned as string - temporal types date, time, timetz, timestamp, and timestamptz are returned as time.Time - the boolean type is returned as bool - the bytea type is returned as []byte All other types are returned directly from the backend as []byte values in text format. Errors pq may return errors of type *pq.Error which can be interrogated for error details: if err, ok := err.(*pq.Error); ok { fmt.Println("pq error:", err.Code.Name()) } See the pq.Error type for details. Bulk imports You can perform bulk imports by preparing a statement returned by pq.CopyIn (or pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement handle can then be repeatedly "executed" to copy data into the target table. After all data has been processed you should call Exec() once with no arguments to flush all buffered data. Any call to Exec() might return an error which should be handled appropriately, but because of the internal buffering an error returned by Exec() might not be related to the data passed in the call that failed. CopyIn uses COPY FROM internally. It is not possible to COPY outside of an explicit transaction in pq. Usage example: txn, err := db.Begin() if err != nil { log.Fatal(err) } stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) if err != nil { log.Fatal(err) } for _, user := range users { _, err = stmt.Exec(user.Name, int64(user.Age)) if err != nil { log.Fatal(err) } } _, err = stmt.Exec() if err != nil { log.Fatal(err) } err = stmt.Close() if err != nil { log.Fatal(err) } err = txn.Commit() if err != nil { log.Fatal(err) } Notifications PostgreSQL supports a simple publish/subscribe model over database connections. See http://www.postgresql.org/docs/current/static/sql-notify.html for more information about the general mechanism. To start listening for notifications, you first have to open a new connection to the database by calling NewListener. This connection can not be used for anything other than LISTEN / NOTIFY. Calling Listen will open a "notification channel"; once a notification channel is open, a notification generated on that channel will effect a send on the Listener.Notify channel. A notification channel will remain open until Unlisten is called, though connection loss might result in some notifications being lost. To solve this problem, Listener sends a nil pointer over the Notify channel any time the connection is re-established following a connection loss. The application can get information about the state of the underlying connection by setting an event callback in the call to NewListener. A single Listener can safely be used from concurrent goroutines, which means that there is often no need to create more than one Listener in your application. However, a Listener is always connected to a single database, so you will need to create a new Listener instance for every database you want to receive notifications in. The channel name in both Listen and Unlisten is case sensitive, and can contain any characters legal in an identifier (see http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS for more information). Note that the channel name will be truncated to 63 bytes by the PostgreSQL server. You can find a complete, working example of Listener usage at https://godoc.org/github.com/lib/pq/example/listen. Kerberos Support If you need support for Kerberos authentication, add the following to your main package: import "github.com/lib/pq/auth/kerberos" func init() { pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) } This package is in a separate module so that users who don't need Kerberos don't have to download unnecessary dependencies. When imported, additional connection string parameters are supported: * krbsrvname - GSS (Kerberos) service name when constructing the SPN (default is `postgres`). This will be combined with the host to form the full SPN: `krbsrvname/host`. * krbspn - GSS (Kerberos) SPN. This takes priority over `krbsrvname` if present. */ package pq dependencies/pkg/mod/github.com/lib/pq@v1.10.9/buf_test.go0000644000000000000000000000033515024302467022072 0ustar rootrootpackage pq import "testing" func Benchmark_writeBuf_string(b *testing.B) { var buf writeBuf const s = "foo" b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { buf.string(s) buf.buf = buf.buf[:0] } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/notice_example_test.go0000644000000000000000000000127215024302467024313 0ustar rootroot//go:build go1.10 // +build go1.10 package pq_test import ( "database/sql" "fmt" "log" "github.com/lib/pq" ) func ExampleConnectorWithNoticeHandler() { name := "" // Base connector to wrap base, err := pq.NewConnector(name) if err != nil { log.Fatal(err) } // Wrap the connector to simply print out the message connector := pq.ConnectorWithNoticeHandler(base, func(notice *pq.Error) { fmt.Println("Notice sent: " + notice.Message) }) db := sql.OpenDB(connector) defer db.Close() // Raise a notice sql := "DO language plpgsql $$ BEGIN RAISE NOTICE 'test notice'; END $$" if _, err := db.Exec(sql); err != nil { log.Fatal(err) } // Output: // Notice sent: test notice } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/user_other.go0000644000000000000000000000035215024302467022435 0ustar rootroot// Package pq is a pure Go Postgres driver for the database/sql package. //go:build js || android || hurd || zos // +build js android hurd zos package pq func userCurrent() (string, error) { return "", ErrCouldNotDetectUsername } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/README.md0000644000000000000000000000234615024302467021213 0ustar rootroot# pq - A pure Go postgres driver for Go's database/sql package [![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) ## Install go get github.com/lib/pq ## Features * SSL * Handles bad connections for `database/sql` * Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) * Scan binary blobs correctly (i.e. `bytea`) * Package for `hstore` support * COPY FROM support * pq.ParseURL for converting urls to connection strings for sql.Open. * Many libpq compatible environment variables * Unix socket support * Notifications: `LISTEN`/`NOTIFY` * pgpass support * GSS (Kerberos) auth ## Tests `go test` is used for testing. See [TESTS.md](TESTS.md) for more details. ## Status This package is currently in maintenance mode, which means: 1. It generally does not accept new features. 2. It does accept bug fixes and version compatability changes provided by the community. 3. Maintainers usually do not resolve reported issues. 4. Community members are encouraged to help each other with reported issues. For users that require new features or reliable resolution of reported bugs, we recommend using [pgx](https://github.com/jackc/pgx) which is under active development. dependencies/pkg/mod/github.com/lib/pq@v1.10.9/bench_test.go0000644000000000000000000002411415024302467022376 0ustar rootrootpackage pq import ( "bufio" "bytes" "context" "database/sql" "database/sql/driver" "io" "math/rand" "net" "runtime" "strconv" "strings" "sync" "testing" "time" "github.com/lib/pq/oid" ) var ( selectStringQuery = "SELECT '" + strings.Repeat("0123456789", 10) + "'" selectSeriesQuery = "SELECT generate_series(1, 100)" ) func BenchmarkSelectString(b *testing.B) { var result string benchQuery(b, selectStringQuery, &result) } func BenchmarkSelectSeries(b *testing.B) { var result int benchQuery(b, selectSeriesQuery, &result) } func benchQuery(b *testing.B, query string, result interface{}) { b.StopTimer() db := openTestConn(b) defer db.Close() b.StartTimer() for i := 0; i < b.N; i++ { benchQueryLoop(b, db, query, result) } } func benchQueryLoop(b *testing.B, db *sql.DB, query string, result interface{}) { rows, err := db.Query(query) if err != nil { b.Fatal(err) } defer rows.Close() for rows.Next() { err = rows.Scan(result) if err != nil { b.Fatal("failed to scan", err) } } } // reading from circularConn yields content[:prefixLen] once, followed by // content[prefixLen:] over and over again. It never returns EOF. type circularConn struct { content string prefixLen int pos int net.Conn // for all other net.Conn methods that will never be called } func (r *circularConn) Read(b []byte) (n int, err error) { n = copy(b, r.content[r.pos:]) r.pos += n if r.pos >= len(r.content) { r.pos = r.prefixLen } return } func (r *circularConn) Write(b []byte) (n int, err error) { return len(b), nil } func (r *circularConn) Close() error { return nil } func fakeConn(content string, prefixLen int) *conn { c := &circularConn{content: content, prefixLen: prefixLen} return &conn{buf: bufio.NewReader(c), c: c} } // This benchmark is meant to be the same as BenchmarkSelectString, but takes // out some of the factors this package can't control. The numbers are less noisy, // but also the costs of network communication aren't accurately represented. func BenchmarkMockSelectString(b *testing.B) { b.StopTimer() // taken from a recorded run of BenchmarkSelectString // See: http://www.postgresql.org/docs/current/static/protocol-message-formats.html const response = "1\x00\x00\x00\x04" + "t\x00\x00\x00\x06\x00\x00" + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + "Z\x00\x00\x00\x05I" + "2\x00\x00\x00\x04" + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + "C\x00\x00\x00\rSELECT 1\x00" + "Z\x00\x00\x00\x05I" + "3\x00\x00\x00\x04" + "Z\x00\x00\x00\x05I" c := fakeConn(response, 0) b.StartTimer() for i := 0; i < b.N; i++ { benchMockQuery(b, c, selectStringQuery) } } var seriesRowData = func() string { var buf bytes.Buffer for i := 1; i <= 100; i++ { digits := byte(2) if i >= 100 { digits = 3 } else if i < 10 { digits = 1 } buf.WriteString("D\x00\x00\x00") buf.WriteByte(10 + digits) buf.WriteString("\x00\x01\x00\x00\x00") buf.WriteByte(digits) buf.WriteString(strconv.Itoa(i)) } return buf.String() }() func BenchmarkMockSelectSeries(b *testing.B) { b.StopTimer() var response = "1\x00\x00\x00\x04" + "t\x00\x00\x00\x06\x00\x00" + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + "Z\x00\x00\x00\x05I" + "2\x00\x00\x00\x04" + seriesRowData + "C\x00\x00\x00\x0fSELECT 100\x00" + "Z\x00\x00\x00\x05I" + "3\x00\x00\x00\x04" + "Z\x00\x00\x00\x05I" c := fakeConn(response, 0) b.StartTimer() for i := 0; i < b.N; i++ { benchMockQuery(b, c, selectSeriesQuery) } } func benchMockQuery(b *testing.B, c *conn, query string) { stmt, err := c.Prepare(query) if err != nil { b.Fatal(err) } defer stmt.Close() rows, err := stmt.(driver.StmtQueryContext).QueryContext(context.Background(), nil) if err != nil { b.Fatal(err) } defer rows.Close() var dest [1]driver.Value for { if err := rows.Next(dest[:]); err != nil { if err == io.EOF { break } b.Fatal(err) } } } func BenchmarkPreparedSelectString(b *testing.B) { var result string benchPreparedQuery(b, selectStringQuery, &result) } func BenchmarkPreparedSelectSeries(b *testing.B) { var result int benchPreparedQuery(b, selectSeriesQuery, &result) } func benchPreparedQuery(b *testing.B, query string, result interface{}) { b.StopTimer() db := openTestConn(b) defer db.Close() stmt, err := db.Prepare(query) if err != nil { b.Fatal(err) } defer stmt.Close() b.StartTimer() for i := 0; i < b.N; i++ { benchPreparedQueryLoop(b, db, stmt, result) } } func benchPreparedQueryLoop(b *testing.B, db *sql.DB, stmt *sql.Stmt, result interface{}) { rows, err := stmt.Query() if err != nil { b.Fatal(err) } if !rows.Next() { rows.Close() b.Fatal("no rows") } defer rows.Close() for rows.Next() { err = rows.Scan(&result) if err != nil { b.Fatal("failed to scan") } } } // See the comment for BenchmarkMockSelectString. func BenchmarkMockPreparedSelectString(b *testing.B) { b.StopTimer() const parseResponse = "1\x00\x00\x00\x04" + "t\x00\x00\x00\x06\x00\x00" + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + "Z\x00\x00\x00\x05I" const responses = parseResponse + "2\x00\x00\x00\x04" + "D\x00\x00\x00n\x00\x01\x00\x00\x00d0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" + "C\x00\x00\x00\rSELECT 1\x00" + "Z\x00\x00\x00\x05I" c := fakeConn(responses, len(parseResponse)) stmt, err := c.Prepare(selectStringQuery) if err != nil { b.Fatal(err) } b.StartTimer() for i := 0; i < b.N; i++ { benchPreparedMockQuery(b, c, stmt) } } func BenchmarkMockPreparedSelectSeries(b *testing.B) { b.StopTimer() const parseResponse = "1\x00\x00\x00\x04" + "t\x00\x00\x00\x06\x00\x00" + "T\x00\x00\x00!\x00\x01?column?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xc1\xff\xfe\xff\xff\xff\xff\x00\x00" + "Z\x00\x00\x00\x05I" var responses = parseResponse + "2\x00\x00\x00\x04" + seriesRowData + "C\x00\x00\x00\x0fSELECT 100\x00" + "Z\x00\x00\x00\x05I" c := fakeConn(responses, len(parseResponse)) stmt, err := c.Prepare(selectSeriesQuery) if err != nil { b.Fatal(err) } b.StartTimer() for i := 0; i < b.N; i++ { benchPreparedMockQuery(b, c, stmt) } } func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) { rows, err := stmt.(driver.StmtQueryContext).QueryContext(context.Background(), nil) if err != nil { b.Fatal(err) } defer rows.Close() var dest [1]driver.Value for { if err := rows.Next(dest[:]); err != nil { if err == io.EOF { break } b.Fatal(err) } } } func BenchmarkEncodeInt64(b *testing.B) { for i := 0; i < b.N; i++ { encode(¶meterStatus{}, int64(1234), oid.T_int8) } } func BenchmarkEncodeFloat64(b *testing.B) { for i := 0; i < b.N; i++ { encode(¶meterStatus{}, 3.14159, oid.T_float8) } } var testByteString = []byte("abcdefghijklmnopqrstuvwxyz") func BenchmarkEncodeByteaHex(b *testing.B) { for i := 0; i < b.N; i++ { encode(¶meterStatus{serverVersion: 90000}, testByteString, oid.T_bytea) } } func BenchmarkEncodeByteaEscape(b *testing.B) { for i := 0; i < b.N; i++ { encode(¶meterStatus{serverVersion: 84000}, testByteString, oid.T_bytea) } } func BenchmarkEncodeBool(b *testing.B) { for i := 0; i < b.N; i++ { encode(¶meterStatus{}, true, oid.T_bool) } } var testTimestamptz = time.Date(2001, time.January, 1, 0, 0, 0, 0, time.Local) func BenchmarkEncodeTimestamptz(b *testing.B) { for i := 0; i < b.N; i++ { encode(¶meterStatus{}, testTimestamptz, oid.T_timestamptz) } } var testIntBytes = []byte("1234") func BenchmarkDecodeInt64(b *testing.B) { for i := 0; i < b.N; i++ { decode(¶meterStatus{}, testIntBytes, oid.T_int8, formatText) } } var testFloatBytes = []byte("3.14159") func BenchmarkDecodeFloat64(b *testing.B) { for i := 0; i < b.N; i++ { decode(¶meterStatus{}, testFloatBytes, oid.T_float8, formatText) } } var testBoolBytes = []byte{'t'} func BenchmarkDecodeBool(b *testing.B) { for i := 0; i < b.N; i++ { decode(¶meterStatus{}, testBoolBytes, oid.T_bool, formatText) } } func TestDecodeBool(t *testing.T) { db := openTestConn(t) rows, err := db.Query("select true") if err != nil { t.Fatal(err) } rows.Close() } var testTimestamptzBytes = []byte("2013-09-17 22:15:32.360754-07") func BenchmarkDecodeTimestamptz(b *testing.B) { for i := 0; i < b.N; i++ { decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) } } func BenchmarkDecodeTimestamptzMultiThread(b *testing.B) { oldProcs := runtime.GOMAXPROCS(0) defer runtime.GOMAXPROCS(oldProcs) runtime.GOMAXPROCS(runtime.NumCPU()) globalLocationCache = newLocationCache() f := func(wg *sync.WaitGroup, loops int) { defer wg.Done() for i := 0; i < loops; i++ { decode(¶meterStatus{}, testTimestamptzBytes, oid.T_timestamptz, formatText) } } wg := &sync.WaitGroup{} b.ResetTimer() for j := 0; j < 10; j++ { wg.Add(1) go f(wg, b.N/10) } wg.Wait() } func BenchmarkLocationCache(b *testing.B) { globalLocationCache = newLocationCache() for i := 0; i < b.N; i++ { globalLocationCache.getLocation(rand.Intn(10000)) } } func BenchmarkLocationCacheMultiThread(b *testing.B) { oldProcs := runtime.GOMAXPROCS(0) defer runtime.GOMAXPROCS(oldProcs) runtime.GOMAXPROCS(runtime.NumCPU()) globalLocationCache = newLocationCache() f := func(wg *sync.WaitGroup, loops int) { defer wg.Done() for i := 0; i < loops; i++ { globalLocationCache.getLocation(rand.Intn(10000)) } } wg := &sync.WaitGroup{} b.ResetTimer() for j := 0; j < 10; j++ { wg.Add(1) go f(wg, b.N/10) } wg.Wait() } // Stress test the performance of parsing results from the wire. func BenchmarkResultParsing(b *testing.B) { b.StopTimer() db := openTestConn(b) defer db.Close() _, err := db.Exec("BEGIN") if err != nil { b.Fatal(err) } b.StartTimer() for i := 0; i < b.N; i++ { res, err := db.Query("SELECT generate_series(1, 50000)") if err != nil { b.Fatal(err) } res.Close() } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/conn_test.go0000644000000000000000000012746415024302467022270 0ustar rootrootpackage pq import ( "context" "database/sql" "database/sql/driver" "errors" "fmt" "io" "net" "os" "reflect" "strings" "testing" "time" ) type Fatalistic interface { Fatal(args ...interface{}) } func forceBinaryParameters() bool { bp := os.Getenv("PQTEST_BINARY_PARAMETERS") if bp == "yes" { return true } else if bp == "" || bp == "no" { return false } else { panic("unexpected value for PQTEST_BINARY_PARAMETERS") } } func testConninfo(conninfo string) string { defaultTo := func(envvar string, value string) { if os.Getenv(envvar) == "" { os.Setenv(envvar, value) } } defaultTo("PGDATABASE", "pqgotest") defaultTo("PGSSLMODE", "disable") defaultTo("PGCONNECT_TIMEOUT", "20") if forceBinaryParameters() && !strings.HasPrefix(conninfo, "postgres://") && !strings.HasPrefix(conninfo, "postgresql://") { conninfo += " binary_parameters=yes" } return conninfo } func openTestConnConninfo(conninfo string) (*sql.DB, error) { return sql.Open("postgres", testConninfo(conninfo)) } func openTestConn(t Fatalistic) *sql.DB { conn, err := openTestConnConninfo("") if err != nil { t.Fatal(err) } return conn } func getServerVersion(t *testing.T, db *sql.DB) int { var version int err := db.QueryRow("SHOW server_version_num").Scan(&version) if err != nil { t.Fatal(err) } return version } func TestReconnect(t *testing.T) { db1 := openTestConn(t) defer db1.Close() tx, err := db1.Begin() if err != nil { t.Fatal(err) } var pid1 int err = tx.QueryRow("SELECT pg_backend_pid()").Scan(&pid1) if err != nil { t.Fatal(err) } db2 := openTestConn(t) defer db2.Close() _, err = db2.Exec("SELECT pg_terminate_backend($1)", pid1) if err != nil { t.Fatal(err) } // The rollback will probably "fail" because we just killed // its connection above _ = tx.Rollback() const expected int = 42 var result int err = db1.QueryRow(fmt.Sprintf("SELECT %d", expected)).Scan(&result) if err != nil { t.Fatal(err) } if result != expected { t.Errorf("got %v; expected %v", result, expected) } } func TestCommitInFailedTransaction(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } rows, err := txn.Query("SELECT error") if err == nil { rows.Close() t.Fatal("expected failure") } err = txn.Commit() if err != ErrInFailedTransaction { t.Fatalf("expected ErrInFailedTransaction; got %#v", err) } } func TestOpenURL(t *testing.T) { testURL := func(url string) { db, err := openTestConnConninfo(url) if err != nil { t.Fatal(err) } defer db.Close() // database/sql might not call our Open at all unless we do something with // the connection txn, err := db.Begin() if err != nil { t.Fatal(err) } txn.Rollback() } testURL("postgres://") testURL("postgresql://") } const pgpassFile = "/tmp/pqgotest_pgpass" func TestPgpass(t *testing.T) { testAssert := func(conninfo string, expected string, reason string) { conn, err := openTestConnConninfo(conninfo) if err != nil { t.Fatal(err) } defer conn.Close() txn, err := conn.Begin() if err != nil { if expected != "fail" { t.Fatalf(reason, err) } return } rows, err := txn.Query("SELECT USER") if err != nil { txn.Rollback() if expected != "fail" { t.Fatalf(reason, err) } } else { rows.Close() if expected != "ok" { t.Fatalf(reason, err) } } txn.Rollback() } testAssert("", "ok", "missing .pgpass, unexpected error %#v") os.Setenv("PGPASSFILE", pgpassFile) testAssert("host=/tmp", "fail", ", unexpected error %#v") os.Remove(pgpassFile) pgpass, err := os.OpenFile(pgpassFile, os.O_RDWR|os.O_CREATE, 0644) if err != nil { t.Fatalf("Unexpected error writing pgpass file %#v", err) } _, err = pgpass.WriteString(`# comment server:5432:some_db:some_user:pass_A *:5432:some_db:some_user:pass_B localhost:*:*:*:pass_C *:*:*:*:pass_fallback `) if err != nil { t.Fatalf("Unexpected error writing pgpass file %#v", err) } pgpass.Close() assertPassword := func(extra values, expected string) { o := values{ "host": "localhost", "sslmode": "disable", "connect_timeout": "20", "user": "majid", "port": "5432", "extra_float_digits": "2", "dbname": "pqgotest", "client_encoding": "UTF8", "datestyle": "ISO, MDY", } for k, v := range extra { o[k] = v } (&conn{}).handlePgpass(o) if pw := o["password"]; pw != expected { t.Fatalf("For %v expected %s got %s", extra, expected, pw) } } // wrong permissions for the pgpass file means it should be ignored assertPassword(values{"host": "example.com", "user": "foo"}, "") // fix the permissions and check if it has taken effect os.Chmod(pgpassFile, 0600) assertPassword(values{"host": "server", "dbname": "some_db", "user": "some_user"}, "pass_A") assertPassword(values{"host": "example.com", "user": "foo"}, "pass_fallback") assertPassword(values{"host": "example.com", "dbname": "some_db", "user": "some_user"}, "pass_B") // localhost also matches the default "" and UNIX sockets assertPassword(values{"host": "", "user": "some_user"}, "pass_C") assertPassword(values{"host": "/tmp", "user": "some_user"}, "pass_C") // cleanup os.Remove(pgpassFile) os.Setenv("PGPASSFILE", "") } func TestExec(t *testing.T) { db := openTestConn(t) defer db.Close() _, err := db.Exec("CREATE TEMP TABLE temp (a int)") if err != nil { t.Fatal(err) } r, err := db.Exec("INSERT INTO temp VALUES (1)") if err != nil { t.Fatal(err) } if n, _ := r.RowsAffected(); n != 1 { t.Fatalf("expected 1 row affected, not %d", n) } r, err = db.Exec("INSERT INTO temp VALUES ($1), ($2), ($3)", 1, 2, 3) if err != nil { t.Fatal(err) } if n, _ := r.RowsAffected(); n != 3 { t.Fatalf("expected 3 rows affected, not %d", n) } // SELECT doesn't send the number of returned rows in the command tag // before 9.0 if getServerVersion(t, db) >= 90000 { r, err = db.Exec("SELECT g FROM generate_series(1, 2) g") if err != nil { t.Fatal(err) } if n, _ := r.RowsAffected(); n != 2 { t.Fatalf("expected 2 rows affected, not %d", n) } r, err = db.Exec("SELECT g FROM generate_series(1, $1) g", 3) if err != nil { t.Fatal(err) } if n, _ := r.RowsAffected(); n != 3 { t.Fatalf("expected 3 rows affected, not %d", n) } } } func TestStatment(t *testing.T) { db := openTestConn(t) defer db.Close() st, err := db.Prepare("SELECT 1") if err != nil { t.Fatal(err) } st1, err := db.Prepare("SELECT 2") if err != nil { t.Fatal(err) } r, err := st.Query() if err != nil { t.Fatal(err) } defer r.Close() if !r.Next() { t.Fatal("expected row") } var i int err = r.Scan(&i) if err != nil { t.Fatal(err) } if i != 1 { t.Fatalf("expected 1, got %d", i) } // st1 r1, err := st1.Query() if err != nil { t.Fatal(err) } defer r1.Close() if !r1.Next() { if r.Err() != nil { t.Fatal(r1.Err()) } t.Fatal("expected row") } err = r1.Scan(&i) if err != nil { t.Fatal(err) } if i != 2 { t.Fatalf("expected 2, got %d", i) } } func TestRowsCloseBeforeDone(t *testing.T) { db := openTestConn(t) defer db.Close() r, err := db.Query("SELECT 1") if err != nil { t.Fatal(err) } err = r.Close() if err != nil { t.Fatal(err) } if r.Next() { t.Fatal("unexpected row") } if r.Err() != nil { t.Fatal(r.Err()) } } func TestParameterCountMismatch(t *testing.T) { db := openTestConn(t) defer db.Close() var notused int err := db.QueryRow("SELECT false", 1).Scan(¬used) if err == nil { t.Fatal("expected err") } // make sure we clean up correctly err = db.QueryRow("SELECT 1").Scan(¬used) if err != nil { t.Fatal(err) } err = db.QueryRow("SELECT $1").Scan(¬used) if err == nil { t.Fatal("expected err") } // make sure we clean up correctly err = db.QueryRow("SELECT 1").Scan(¬used) if err != nil { t.Fatal(err) } } // Test that EmptyQueryResponses are handled correctly. func TestEmptyQuery(t *testing.T) { db := openTestConn(t) defer db.Close() res, err := db.Exec("") if err != nil { t.Fatal(err) } if _, err := res.RowsAffected(); err != errNoRowsAffected { t.Fatalf("expected %s, got %v", errNoRowsAffected, err) } if _, err := res.LastInsertId(); err != errNoLastInsertID { t.Fatalf("expected %s, got %v", errNoLastInsertID, err) } rows, err := db.Query("") if err != nil { t.Fatal(err) } cols, err := rows.Columns() if err != nil { t.Fatal(err) } if len(cols) != 0 { t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) } if rows.Next() { t.Fatal("unexpected row") } if rows.Err() != nil { t.Fatal(rows.Err()) } stmt, err := db.Prepare("") if err != nil { t.Fatal(err) } res, err = stmt.Exec() if err != nil { t.Fatal(err) } if _, err := res.RowsAffected(); err != errNoRowsAffected { t.Fatalf("expected %s, got %v", errNoRowsAffected, err) } if _, err := res.LastInsertId(); err != errNoLastInsertID { t.Fatalf("expected %s, got %v", errNoLastInsertID, err) } rows, err = stmt.Query() if err != nil { t.Fatal(err) } cols, err = rows.Columns() if err != nil { t.Fatal(err) } if len(cols) != 0 { t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) } if rows.Next() { t.Fatal("unexpected row") } if rows.Err() != nil { t.Fatal(rows.Err()) } } // Test that rows.Columns() is correct even if there are no result rows. func TestEmptyResultSetColumns(t *testing.T) { db := openTestConn(t) defer db.Close() rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar WHERE FALSE") if err != nil { t.Fatal(err) } cols, err := rows.Columns() if err != nil { t.Fatal(err) } if len(cols) != 2 { t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) } if rows.Next() { t.Fatal("unexpected row") } if rows.Err() != nil { t.Fatal(rows.Err()) } if cols[0] != "a" || cols[1] != "bar" { t.Fatalf("unexpected Columns result %v", cols) } stmt, err := db.Prepare("SELECT $1::int AS a, text 'bar' AS bar WHERE FALSE") if err != nil { t.Fatal(err) } rows, err = stmt.Query(1) if err != nil { t.Fatal(err) } cols, err = rows.Columns() if err != nil { t.Fatal(err) } if len(cols) != 2 { t.Fatalf("unexpected number of columns %d in response to an empty query", len(cols)) } if rows.Next() { t.Fatal("unexpected row") } if rows.Err() != nil { t.Fatal(rows.Err()) } if cols[0] != "a" || cols[1] != "bar" { t.Fatalf("unexpected Columns result %v", cols) } } func TestEncodeDecode(t *testing.T) { db := openTestConn(t) defer db.Close() q := ` SELECT E'\\000\\001\\002'::bytea, 'foobar'::text, NULL::integer, '2000-1-1 01:02:03.04-7'::timestamptz, 0::boolean, 123, -321, 3.14::float8 WHERE E'\\000\\001\\002'::bytea = $1 AND 'foobar'::text = $2 AND $3::integer is NULL ` // AND '2000-1-1 12:00:00.000000-7'::timestamp = $3 exp1 := []byte{0, 1, 2} exp2 := "foobar" r, err := db.Query(q, exp1, exp2, nil) if err != nil { t.Fatal(err) } defer r.Close() if !r.Next() { if r.Err() != nil { t.Fatal(r.Err()) } t.Fatal("expected row") } var got1 []byte var got2 string var got3 = sql.NullInt64{Valid: true} var got4 time.Time var got5, got6, got7, got8 interface{} err = r.Scan(&got1, &got2, &got3, &got4, &got5, &got6, &got7, &got8) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(exp1, got1) { t.Errorf("expected %q byte: %q", exp1, got1) } if !reflect.DeepEqual(exp2, got2) { t.Errorf("expected %q byte: %q", exp2, got2) } if got3.Valid { t.Fatal("expected invalid") } if got4.Year() != 2000 { t.Fatal("wrong year") } if got5 != false { t.Fatalf("expected false, got %q", got5) } if got6 != int64(123) { t.Fatalf("expected 123, got %d", got6) } if got7 != int64(-321) { t.Fatalf("expected -321, got %d", got7) } if got8 != float64(3.14) { t.Fatalf("expected 3.14, got %f", got8) } } func TestNoData(t *testing.T) { db := openTestConn(t) defer db.Close() st, err := db.Prepare("SELECT 1 WHERE true = false") if err != nil { t.Fatal(err) } defer st.Close() r, err := st.Query() if err != nil { t.Fatal(err) } defer r.Close() if r.Next() { if r.Err() != nil { t.Fatal(r.Err()) } t.Fatal("unexpected row") } _, err = db.Query("SELECT * FROM nonexistenttable WHERE age=$1", 20) if err == nil { t.Fatal("Should have raised an error on non existent table") } _, err = db.Query("SELECT * FROM nonexistenttable") if err == nil { t.Fatal("Should have raised an error on non existent table") } } func TestErrorDuringStartup(t *testing.T) { // Don't use the normal connection setup, this is intended to // blow up in the startup packet from a non-existent user. db, err := openTestConnConninfo("user=thisuserreallydoesntexist") if err != nil { t.Fatal(err) } defer db.Close() _, err = db.Begin() if err == nil { t.Fatal("expected error") } e, ok := err.(*Error) if !ok { t.Fatalf("expected Error, got %#v", err) } else if e.Code.Name() != "invalid_authorization_specification" && e.Code.Name() != "invalid_password" { t.Fatalf("expected invalid_authorization_specification or invalid_password, got %s (%+v)", e.Code.Name(), err) } } type testConn struct { closed bool net.Conn } func (c *testConn) Close() error { c.closed = true return c.Conn.Close() } type testDialer struct { conns []*testConn } func (d *testDialer) Dial(ntw, addr string) (net.Conn, error) { c, err := net.Dial(ntw, addr) if err != nil { return nil, err } tc := &testConn{Conn: c} d.conns = append(d.conns, tc) return tc, nil } func (d *testDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) { c, err := net.DialTimeout(ntw, addr, timeout) if err != nil { return nil, err } tc := &testConn{Conn: c} d.conns = append(d.conns, tc) return tc, nil } func TestErrorDuringStartupClosesConn(t *testing.T) { // Don't use the normal connection setup, this is intended to // blow up in the startup packet from a non-existent user. var d testDialer c, err := DialOpen(&d, testConninfo("user=thisuserreallydoesntexist")) if err == nil { c.Close() t.Fatal("expected dial error") } if len(d.conns) != 1 { t.Fatalf("got len(d.conns) = %d, want = %d", len(d.conns), 1) } if !d.conns[0].closed { t.Error("connection leaked") } } func TestBadConn(t *testing.T) { var err error cn := conn{} func() { defer cn.errRecover(&err) panic(io.EOF) }() if err != driver.ErrBadConn { t.Fatalf("expected driver.ErrBadConn, got: %#v", err) } if err := cn.err.get(); err != driver.ErrBadConn { t.Fatalf("expected driver.ErrBadConn, got %#v", err) } cn = conn{} func() { defer cn.errRecover(&err) e := &Error{Severity: Efatal} panic(e) }() if err != driver.ErrBadConn { t.Fatalf("expected driver.ErrBadConn, got: %#v", err) } if err := cn.err.get(); err != driver.ErrBadConn { t.Fatalf("expected driver.ErrBadConn, got %#v", err) } } // TestCloseBadConn tests that the underlying connection can be closed with // Close after an error. func TestCloseBadConn(t *testing.T) { host := os.Getenv("PGHOST") if host == "" { host = "localhost" } port := os.Getenv("PGPORT") if port == "" { port = "5432" } nc, err := net.Dial("tcp", host+":"+port) if err != nil { t.Fatal(err) } cn := conn{c: nc} func() { defer cn.errRecover(&err) panic(io.EOF) }() // Verify we can write before closing. if _, err := nc.Write(nil); err != nil { t.Fatal(err) } // First close should close the connection. if err := cn.Close(); err != nil { t.Fatal(err) } // During the Go 1.9 cycle, https://github.com/golang/go/commit/3792db5 // changed this error from // // net.errClosing = errors.New("use of closed network connection") // // to // // internal/poll.ErrClosing = errors.New("use of closed file or network connection") const errClosing = "use of closed" // Verify write after closing fails. if _, err := nc.Write(nil); err == nil { t.Fatal("expected error") } else if !strings.Contains(err.Error(), errClosing) { t.Fatalf("expected %s error, got %s", errClosing, err) } // Verify second close fails. if err := cn.Close(); err == nil { t.Fatal("expected error") } else if !strings.Contains(err.Error(), errClosing) { t.Fatalf("expected %s error, got %s", errClosing, err) } } func TestErrorOnExec(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") if err != nil { t.Fatal(err) } _, err = txn.Exec("INSERT INTO foo VALUES (0), (0)") if err == nil { t.Fatal("Should have raised error") } e, ok := err.(*Error) if !ok { t.Fatalf("expected Error, got %#v", err) } else if e.Code.Name() != "unique_violation" { t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) } } func TestErrorOnQuery(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") if err != nil { t.Fatal(err) } _, err = txn.Query("INSERT INTO foo VALUES (0), (0)") if err == nil { t.Fatal("Should have raised error") } e, ok := err.(*Error) if !ok { t.Fatalf("expected Error, got %#v", err) } else if e.Code.Name() != "unique_violation" { t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) } } func TestErrorOnQueryRowSimpleQuery(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMPORARY TABLE foo(f1 int PRIMARY KEY)") if err != nil { t.Fatal(err) } var v int err = txn.QueryRow("INSERT INTO foo VALUES (0), (0)").Scan(&v) if err == nil { t.Fatal("Should have raised error") } e, ok := err.(*Error) if !ok { t.Fatalf("expected Error, got %#v", err) } else if e.Code.Name() != "unique_violation" { t.Fatalf("expected unique_violation, got %s (%+v)", e.Code.Name(), err) } } // Test the QueryRow bug workarounds in stmt.exec() and simpleQuery() func TestQueryRowBugWorkaround(t *testing.T) { db := openTestConn(t) defer db.Close() // stmt.exec() _, err := db.Exec("CREATE TEMP TABLE notnulltemp (a varchar(10) not null)") if err != nil { t.Fatal(err) } var a string err = db.QueryRow("INSERT INTO notnulltemp(a) values($1) RETURNING a", nil).Scan(&a) if err == sql.ErrNoRows { t.Fatalf("expected constraint violation error; got: %v", err) } pge, ok := err.(*Error) if !ok { t.Fatalf("expected *Error; got: %#v", err) } if pge.Code.Name() != "not_null_violation" { t.Fatalf("expected not_null_violation; got: %s (%+v)", pge.Code.Name(), err) } // Test workaround in simpleQuery() tx, err := db.Begin() if err != nil { t.Fatalf("unexpected error %s in Begin", err) } defer tx.Rollback() _, err = tx.Exec("SET LOCAL check_function_bodies TO FALSE") if err != nil { t.Fatalf("could not disable check_function_bodies: %s", err) } _, err = tx.Exec(` CREATE OR REPLACE FUNCTION bad_function() RETURNS integer -- hack to prevent the function from being inlined SET check_function_bodies TO TRUE AS $$ SELECT text 'bad' $$ LANGUAGE sql`) if err != nil { t.Fatalf("could not create function: %s", err) } err = tx.QueryRow("SELECT * FROM bad_function()").Scan(&a) if err == nil { t.Fatalf("expected error") } pge, ok = err.(*Error) if !ok { t.Fatalf("expected *Error; got: %#v", err) } if pge.Code.Name() != "invalid_function_definition" { t.Fatalf("expected invalid_function_definition; got: %s (%+v)", pge.Code.Name(), err) } err = tx.Rollback() if err != nil { t.Fatalf("unexpected error %s in Rollback", err) } // Also test that simpleQuery()'s workaround works when the query fails // after a row has been received. rows, err := db.Query(` select (select generate_series(1, ss.i)) from (select gs.i from generate_series(1, 2) gs(i) order by gs.i limit 2) ss`) if err != nil { t.Fatalf("query failed: %s", err) } if !rows.Next() { t.Fatalf("expected at least one result row; got %s", rows.Err()) } var i int err = rows.Scan(&i) if err != nil { t.Fatalf("rows.Scan() failed: %s", err) } if i != 1 { t.Fatalf("unexpected value for i: %d", i) } if rows.Next() { t.Fatalf("unexpected row") } pge, ok = rows.Err().(*Error) if !ok { t.Fatalf("expected *Error; got: %#v", err) } if pge.Code.Name() != "cardinality_violation" { t.Fatalf("expected cardinality_violation; got: %s (%+v)", pge.Code.Name(), rows.Err()) } } func TestSimpleQuery(t *testing.T) { db := openTestConn(t) defer db.Close() r, err := db.Query("select 1") if err != nil { t.Fatal(err) } defer r.Close() if !r.Next() { t.Fatal("expected row") } } func TestBindError(t *testing.T) { db := openTestConn(t) defer db.Close() _, err := db.Exec("create temp table test (i integer)") if err != nil { t.Fatal(err) } _, err = db.Query("select * from test where i=$1", "hhh") if err == nil { t.Fatal("expected an error") } // Should not get error here r, err := db.Query("select * from test where i=$1", 1) if err != nil { t.Fatal(err) } defer r.Close() } func TestParseErrorInExtendedQuery(t *testing.T) { db := openTestConn(t) defer db.Close() _, err := db.Query("PARSE_ERROR $1", 1) pqErr, _ := err.(*Error) // Expecting a syntax error. if err == nil || pqErr == nil || pqErr.Code != "42601" { t.Fatalf("expected syntax error, got %s", err) } rows, err := db.Query("SELECT 1") if err != nil { t.Fatal(err) } rows.Close() } // TestReturning tests that an INSERT query using the RETURNING clause returns a row. func TestReturning(t *testing.T) { db := openTestConn(t) defer db.Close() _, err := db.Exec("CREATE TEMP TABLE distributors (did integer default 0, dname text)") if err != nil { t.Fatal(err) } rows, err := db.Query("INSERT INTO distributors (did, dname) VALUES (DEFAULT, 'XYZ Widgets') " + "RETURNING did;") if err != nil { t.Fatal(err) } if !rows.Next() { t.Fatal("no rows") } var did int err = rows.Scan(&did) if err != nil { t.Fatal(err) } if did != 0 { t.Fatalf("bad value for did: got %d, want %d", did, 0) } if rows.Next() { t.Fatal("unexpected next row") } err = rows.Err() if err != nil { t.Fatal(err) } } func TestIssue186(t *testing.T) { db := openTestConn(t) defer db.Close() // Exec() a query which returns results _, err := db.Exec("VALUES (1), (2), (3)") if err != nil { t.Fatal(err) } _, err = db.Exec("VALUES ($1), ($2), ($3)", 1, 2, 3) if err != nil { t.Fatal(err) } // Query() a query which doesn't return any results txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() rows, err := txn.Query("CREATE TEMP TABLE foo(f1 int)") if err != nil { t.Fatal(err) } if err = rows.Close(); err != nil { t.Fatal(err) } // small trick to get NoData from a parameterized query _, err = txn.Exec("CREATE RULE nodata AS ON INSERT TO foo DO INSTEAD NOTHING") if err != nil { t.Fatal(err) } rows, err = txn.Query("INSERT INTO foo VALUES ($1)", 1) if err != nil { t.Fatal(err) } if err = rows.Close(); err != nil { t.Fatal(err) } } func TestIssue196(t *testing.T) { db := openTestConn(t) defer db.Close() row := db.QueryRow("SELECT float4 '0.10000122' = $1, float8 '35.03554004971999' = $2", float32(0.10000122), float64(35.03554004971999)) var float4match, float8match bool err := row.Scan(&float4match, &float8match) if err != nil { t.Fatal(err) } if !float4match { t.Errorf("Expected float4 fidelity to be maintained; got no match") } if !float8match { t.Errorf("Expected float8 fidelity to be maintained; got no match") } } // Test that any CommandComplete messages sent before the query results are // ignored. func TestIssue282(t *testing.T) { db := openTestConn(t) defer db.Close() var searchPath string err := db.QueryRow(` SET LOCAL search_path TO pg_catalog; SET LOCAL search_path TO pg_catalog; SHOW search_path`).Scan(&searchPath) if err != nil { t.Fatal(err) } if searchPath != "pg_catalog" { t.Fatalf("unexpected search_path %s", searchPath) } } func TestReadFloatPrecision(t *testing.T) { db := openTestConn(t) defer db.Close() row := db.QueryRow("SELECT float4 '0.10000122', float8 '35.03554004971999', float4 '1.2'") var float4val float32 var float8val float64 var float4val2 float64 err := row.Scan(&float4val, &float8val, &float4val2) if err != nil { t.Fatal(err) } if float4val != float32(0.10000122) { t.Errorf("Expected float4 fidelity to be maintained; got no match") } if float8val != float64(35.03554004971999) { t.Errorf("Expected float8 fidelity to be maintained; got no match") } if float4val2 != float64(1.2) { t.Errorf("Expected float4 fidelity into a float64 to be maintained; got no match") } } func TestXactMultiStmt(t *testing.T) { // minified test case based on bug reports from // pico303@gmail.com and rangelspam@gmail.com t.Skip("Skipping failing test") db := openTestConn(t) defer db.Close() tx, err := db.Begin() if err != nil { t.Fatal(err) } defer tx.Commit() rows, err := tx.Query("select 1") if err != nil { t.Fatal(err) } if rows.Next() { var val int32 if err = rows.Scan(&val); err != nil { t.Fatal(err) } } else { t.Fatal("Expected at least one row in first query in xact") } rows2, err := tx.Query("select 2") if err != nil { t.Fatal(err) } if rows2.Next() { var val2 int32 if err := rows2.Scan(&val2); err != nil { t.Fatal(err) } } else { t.Fatal("Expected at least one row in second query in xact") } if err = rows.Err(); err != nil { t.Fatal(err) } if err = rows2.Err(); err != nil { t.Fatal(err) } if err = tx.Commit(); err != nil { t.Fatal(err) } } var envParseTests = []struct { Expected map[string]string Env []string }{ { Env: []string{"PGDATABASE=hello", "PGUSER=goodbye"}, Expected: map[string]string{"dbname": "hello", "user": "goodbye"}, }, { Env: []string{"PGDATESTYLE=ISO, MDY"}, Expected: map[string]string{"datestyle": "ISO, MDY"}, }, { Env: []string{"PGCONNECT_TIMEOUT=30"}, Expected: map[string]string{"connect_timeout": "30"}, }, } func TestParseEnviron(t *testing.T) { for i, tt := range envParseTests { results := parseEnviron(tt.Env) if !reflect.DeepEqual(tt.Expected, results) { t.Errorf("%d: Expected: %#v Got: %#v", i, tt.Expected, results) } } } func TestParseComplete(t *testing.T) { tpc := func(commandTag string, command string, affectedRows int64, shouldFail bool) { defer func() { if p := recover(); p != nil { if !shouldFail { t.Error(p) } } }() cn := &conn{} res, c := cn.parseComplete(commandTag) if c != command { t.Errorf("Expected %v, got %v", command, c) } n, err := res.RowsAffected() if err != nil { t.Fatal(err) } if n != affectedRows { t.Errorf("Expected %d, got %d", affectedRows, n) } } tpc("ALTER TABLE", "ALTER TABLE", 0, false) tpc("INSERT 0 1", "INSERT", 1, false) tpc("UPDATE 100", "UPDATE", 100, false) tpc("SELECT 100", "SELECT", 100, false) tpc("FETCH 100", "FETCH", 100, false) // allow COPY (and others) without row count tpc("COPY", "COPY", 0, false) // don't fail on command tags we don't recognize tpc("UNKNOWNCOMMANDTAG", "UNKNOWNCOMMANDTAG", 0, false) // failure cases tpc("INSERT 1", "", 0, true) // missing oid tpc("UPDATE 0 1", "", 0, true) // too many numbers tpc("SELECT foo", "", 0, true) // invalid row count } // Test interface conformance. var ( _ driver.ExecerContext = (*conn)(nil) _ driver.QueryerContext = (*conn)(nil) ) func TestNullAfterNonNull(t *testing.T) { db := openTestConn(t) defer db.Close() r, err := db.Query("SELECT 9::integer UNION SELECT NULL::integer") if err != nil { t.Fatal(err) } var n sql.NullInt64 if !r.Next() { if r.Err() != nil { t.Fatal(err) } t.Fatal("expected row") } if err := r.Scan(&n); err != nil { t.Fatal(err) } if n.Int64 != 9 { t.Fatalf("expected 2, not %d", n.Int64) } if !r.Next() { if r.Err() != nil { t.Fatal(err) } t.Fatal("expected row") } if err := r.Scan(&n); err != nil { t.Fatal(err) } if n.Valid { t.Fatal("expected n to be invalid") } if n.Int64 != 0 { t.Fatalf("expected n to 2, not %d", n.Int64) } } func Test64BitErrorChecking(t *testing.T) { defer func() { if err := recover(); err != nil { t.Fatal("panic due to 0xFFFFFFFF != -1 " + "when int is 64 bits") } }() db := openTestConn(t) defer db.Close() r, err := db.Query(`SELECT * FROM (VALUES (0::integer, NULL::text), (1, 'test string')) AS t;`) if err != nil { t.Fatal(err) } defer r.Close() for r.Next() { } } func TestCommit(t *testing.T) { db := openTestConn(t) defer db.Close() _, err := db.Exec("CREATE TEMP TABLE temp (a int)") if err != nil { t.Fatal(err) } sqlInsert := "INSERT INTO temp VALUES (1)" sqlSelect := "SELECT * FROM temp" tx, err := db.Begin() if err != nil { t.Fatal(err) } _, err = tx.Exec(sqlInsert) if err != nil { t.Fatal(err) } err = tx.Commit() if err != nil { t.Fatal(err) } var i int err = db.QueryRow(sqlSelect).Scan(&i) if err != nil { t.Fatal(err) } if i != 1 { t.Fatalf("expected 1, got %d", i) } } func TestErrorClass(t *testing.T) { db := openTestConn(t) defer db.Close() _, err := db.Query("SELECT int 'notint'") if err == nil { t.Fatal("expected error") } pge, ok := err.(*Error) if !ok { t.Fatalf("expected *pq.Error, got %#+v", err) } if pge.Code.Class() != "22" { t.Fatalf("expected class 28, got %v", pge.Code.Class()) } if pge.Code.Class().Name() != "data_exception" { t.Fatalf("expected data_exception, got %v", pge.Code.Class().Name()) } } func TestParseOpts(t *testing.T) { tests := []struct { in string expected values valid bool }{ {"dbname=hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, {"dbname=hello user=goodbye ", values{"dbname": "hello", "user": "goodbye"}, true}, {"dbname = hello user=goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, {"dbname=hello user =goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, {"dbname=hello user= goodbye", values{"dbname": "hello", "user": "goodbye"}, true}, {"host=localhost password='correct horse battery staple'", values{"host": "localhost", "password": "correct horse battery staple"}, true}, {"dbname=データベース password=パスワード", values{"dbname": "データベース", "password": "パスワード"}, true}, {"dbname=hello user=''", values{"dbname": "hello", "user": ""}, true}, {"user='' dbname=hello", values{"dbname": "hello", "user": ""}, true}, // The last option value is an empty string if there's no non-whitespace after its = {"dbname=hello user= ", values{"dbname": "hello", "user": ""}, true}, // The parser ignores spaces after = and interprets the next set of non-whitespace characters as the value. {"user= password=foo", values{"user": "password=foo"}, true}, // Backslash escapes next char {`user=a\ \'\\b`, values{"user": `a '\b`}, true}, {`user='a \'b'`, values{"user": `a 'b`}, true}, // Incomplete escape {`user=x\`, values{}, false}, // No '=' after the key {"postgre://marko@internet", values{}, false}, {"dbname user=goodbye", values{}, false}, {"user=foo blah", values{}, false}, {"user=foo blah ", values{}, false}, // Unterminated quoted value {"dbname=hello user='unterminated", values{}, false}, } for _, test := range tests { o := make(values) err := parseOpts(test.in, o) switch { case err != nil && test.valid: t.Errorf("%q got unexpected error: %s", test.in, err) case err == nil && test.valid && !reflect.DeepEqual(test.expected, o): t.Errorf("%q got: %#v want: %#v", test.in, o, test.expected) case err == nil && !test.valid: t.Errorf("%q expected an error", test.in) } } } func TestRuntimeParameters(t *testing.T) { tests := []struct { conninfo string param string expected string success bool }{ // invalid parameter {"DOESNOTEXIST=foo", "", "", false}, // we can only work with a specific value for these two {"client_encoding=SQL_ASCII", "", "", false}, {"datestyle='ISO, YDM'", "", "", false}, // "options" should work exactly as it does in libpq {"options='-c search_path=pqgotest'", "search_path", "pqgotest", true}, // pq should override client_encoding in this case {"options='-c client_encoding=SQL_ASCII'", "client_encoding", "UTF8", true}, // allow client_encoding to be set explicitly {"client_encoding=UTF8", "client_encoding", "UTF8", true}, // test a runtime parameter not supported by libpq {"work_mem='139kB'", "work_mem", "139kB", true}, // test fallback_application_name {"application_name=foo fallback_application_name=bar", "application_name", "foo", true}, {"application_name='' fallback_application_name=bar", "application_name", "", true}, {"fallback_application_name=bar", "application_name", "bar", true}, } for _, test := range tests { db, err := openTestConnConninfo(test.conninfo) if err != nil { t.Fatal(err) } // application_name didn't exist before 9.0 if test.param == "application_name" && getServerVersion(t, db) < 90000 { db.Close() continue } tryGetParameterValue := func() (value string, success bool) { defer db.Close() row := db.QueryRow("SELECT current_setting($1)", test.param) err = row.Scan(&value) if err != nil { return "", false } return value, true } value, success := tryGetParameterValue() if success != test.success && !test.success { t.Fatalf("%v: unexpected error: %v", test.conninfo, err) } if success != test.success { t.Fatalf("unexpected outcome %v (was expecting %v) for conninfo \"%s\"", success, test.success, test.conninfo) } if value != test.expected { t.Fatalf("bad value for %s: got %s, want %s with conninfo \"%s\"", test.param, value, test.expected, test.conninfo) } } } func TestIsUTF8(t *testing.T) { var cases = []struct { name string want bool }{ {"unicode", true}, {"utf-8", true}, {"utf_8", true}, {"UTF-8", true}, {"UTF8", true}, {"utf8", true}, {"u n ic_ode", true}, {"ut_f%8", true}, {"ubf8", false}, {"punycode", false}, } for _, test := range cases { if g := isUTF8(test.name); g != test.want { t.Errorf("isUTF8(%q) = %v want %v", test.name, g, test.want) } } } func TestQuoteIdentifier(t *testing.T) { var cases = []struct { input string want string }{ {`foo`, `"foo"`}, {`foo bar baz`, `"foo bar baz"`}, {`foo"bar`, `"foo""bar"`}, {"foo\x00bar", `"foo"`}, {"\x00foo", `""`}, } for _, test := range cases { got := QuoteIdentifier(test.input) if got != test.want { t.Errorf("QuoteIdentifier(%q) = %v want %v", test.input, got, test.want) } } } func TestQuoteLiteral(t *testing.T) { var cases = []struct { input string want string }{ {`foo`, `'foo'`}, {`foo bar baz`, `'foo bar baz'`}, {`foo'bar`, `'foo''bar'`}, {`foo\bar`, ` E'foo\\bar'`}, {`foo\ba'r`, ` E'foo\\ba''r'`}, {`foo"bar`, `'foo"bar'`}, {`foo\x00bar`, ` E'foo\\x00bar'`}, {`\x00foo`, ` E'\\x00foo'`}, {`'`, `''''`}, {`''`, `''''''`}, {`\`, ` E'\\'`}, {`'abc'; DROP TABLE users;`, `'''abc''; DROP TABLE users;'`}, {`\'`, ` E'\\'''`}, {`E'\''`, ` E'E''\\'''''`}, {`e'\''`, ` E'e''\\'''''`}, {`E'\'abc\'; DROP TABLE users;'`, ` E'E''\\''abc\\''; DROP TABLE users;'''`}, {`e'\'abc\'; DROP TABLE users;'`, ` E'e''\\''abc\\''; DROP TABLE users;'''`}, } for _, test := range cases { got := QuoteLiteral(test.input) if got != test.want { t.Errorf("QuoteLiteral(%q) = %v want %v", test.input, got, test.want) } } } func TestRowsResultTag(t *testing.T) { type ResultTag interface { Result() driver.Result Tag() string } tests := []struct { query string tag string ra int64 }{ { query: "CREATE TEMP TABLE temp (a int)", tag: "CREATE TABLE", }, { query: "INSERT INTO temp VALUES (1), (2)", tag: "INSERT", ra: 2, }, { query: "SELECT 1", }, // A SELECT anywhere should take precedent. { query: "SELECT 1; INSERT INTO temp VALUES (1), (2)", }, { query: "INSERT INTO temp VALUES (1), (2); SELECT 1", }, // Multiple statements that don't return rows should return the last tag. { query: "CREATE TEMP TABLE t (a int); DROP TABLE t", tag: "DROP TABLE", }, // Ensure a rows-returning query in any position among various tags-returing // statements will prefer the rows. { query: "SELECT 1; CREATE TEMP TABLE t (a int); DROP TABLE t", }, { query: "CREATE TEMP TABLE t (a int); SELECT 1; DROP TABLE t", }, { query: "CREATE TEMP TABLE t (a int); DROP TABLE t; SELECT 1", }, } // If this is the only test run, this will correct the connection string. openTestConn(t).Close() conn, err := Open("") if err != nil { t.Fatal(err) } defer conn.Close() q := conn.(driver.QueryerContext) for _, test := range tests { if rows, err := q.QueryContext(context.Background(), test.query, nil); err != nil { t.Fatalf("%s: %s", test.query, err) } else { r := rows.(ResultTag) if tag := r.Tag(); tag != test.tag { t.Fatalf("%s: unexpected tag %q", test.query, tag) } res := r.Result() if ra, _ := res.RowsAffected(); ra != test.ra { t.Fatalf("%s: unexpected rows affected: %d", test.query, ra) } rows.Close() } } } // TestQuickClose tests that closing a query early allows a subsequent query to work. func TestQuickClose(t *testing.T) { db := openTestConn(t) defer db.Close() tx, err := db.Begin() if err != nil { t.Fatal(err) } rows, err := tx.Query("SELECT 1; SELECT 2;") if err != nil { t.Fatal(err) } if err := rows.Close(); err != nil { t.Fatal(err) } var id int if err := tx.QueryRow("SELECT 3").Scan(&id); err != nil { t.Fatal(err) } if id != 3 { t.Fatalf("unexpected %d", id) } if err := tx.Commit(); err != nil { t.Fatal(err) } } func TestMultipleResult(t *testing.T) { db := openTestConn(t) defer db.Close() rows, err := db.Query(` begin; select * from information_schema.tables limit 1; select * from information_schema.columns limit 2; commit; `) if err != nil { t.Fatal(err) } type set struct { cols []string rowCount int } buf := []*set{} for { cols, err := rows.Columns() if err != nil { t.Fatal(err) } s := &set{ cols: cols, } buf = append(buf, s) for rows.Next() { s.rowCount++ } if !rows.NextResultSet() { break } } if len(buf) != 2 { t.Fatalf("got %d sets, expected 2", len(buf)) } if len(buf[0].cols) == len(buf[1].cols) || len(buf[1].cols) == 0 { t.Fatal("invalid cols size, expected different column count and greater then zero") } if buf[0].rowCount != 1 || buf[1].rowCount != 2 { t.Fatal("incorrect number of rows returned") } } func TestMultipleEmptyResult(t *testing.T) { db := openTestConn(t) defer db.Close() rows, err := db.Query("select 1 where false; select 2") if err != nil { t.Fatal(err) } defer rows.Close() for rows.Next() { t.Fatal("unexpected row") } if !rows.NextResultSet() { t.Fatal("expected more result sets", rows.Err()) } for rows.Next() { var i int if err := rows.Scan(&i); err != nil { t.Fatal(err) } if i != 2 { t.Fatalf("expected 2, got %d", i) } } if rows.NextResultSet() { t.Fatal("unexpected result set") } } func TestCopyInStmtAffectedRows(t *testing.T) { db := openTestConn(t) defer db.Close() _, err := db.Exec("CREATE TEMP TABLE temp (a int)") if err != nil { t.Fatal(err) } txn, err := db.BeginTx(context.TODO(), nil) if err != nil { t.Fatal(err) } copyStmt, err := txn.Prepare(CopyIn("temp", "a")) if err != nil { t.Fatal(err) } res, err := copyStmt.Exec() if err != nil { t.Fatal(err) } res.RowsAffected() res.LastInsertId() } func TestConnPrepareContext(t *testing.T) { db := openTestConn(t) defer db.Close() tests := []struct { name string ctx func() (context.Context, context.CancelFunc) sql string err error }{ { name: "context.Background", ctx: func() (context.Context, context.CancelFunc) { return context.Background(), nil }, sql: "SELECT 1", err: nil, }, { name: "context.WithTimeout exceeded", ctx: func() (context.Context, context.CancelFunc) { return context.WithTimeout(context.Background(), -time.Minute) }, sql: "SELECT 1", err: context.DeadlineExceeded, }, { name: "context.WithTimeout", ctx: func() (context.Context, context.CancelFunc) { return context.WithTimeout(context.Background(), time.Minute) }, sql: "SELECT 1", err: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx, cancel := tt.ctx() if cancel != nil { defer cancel() } _, err := db.PrepareContext(ctx, tt.sql) switch { case (err != nil) != (tt.err != nil): t.Fatalf("conn.PrepareContext() unexpected nil err got = %v, expected = %v", err, tt.err) case (err != nil && tt.err != nil) && (err.Error() != tt.err.Error()): t.Errorf("conn.PrepareContext() got = %v, expected = %v", err.Error(), tt.err.Error()) } }) } } func TestStmtQueryContext(t *testing.T) { db := openTestConn(t) defer db.Close() tests := []struct { name string ctx func() (context.Context, context.CancelFunc) sql string cancelExpected bool }{ { name: "context.Background", ctx: func() (context.Context, context.CancelFunc) { return context.Background(), nil }, sql: "SELECT pg_sleep(1);", cancelExpected: false, }, { name: "context.WithTimeout exceeded", ctx: func() (context.Context, context.CancelFunc) { return context.WithTimeout(context.Background(), 1*time.Second) }, sql: "SELECT pg_sleep(10);", cancelExpected: true, }, { name: "context.WithTimeout", ctx: func() (context.Context, context.CancelFunc) { return context.WithTimeout(context.Background(), time.Minute) }, sql: "SELECT pg_sleep(1);", cancelExpected: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx, cancel := tt.ctx() if cancel != nil { defer cancel() } stmt, err := db.PrepareContext(ctx, tt.sql) if err != nil { t.Fatal(err) } _, err = stmt.QueryContext(ctx) pgErr := (*Error)(nil) switch { case (err != nil) != tt.cancelExpected: t.Fatalf("stmt.QueryContext() unexpected nil err got = %v, cancelExpected = %v", err, tt.cancelExpected) case (err != nil && tt.cancelExpected) && !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode): t.Errorf("stmt.QueryContext() got = %v, cancelExpected = %v", err.Error(), tt.cancelExpected) } }) } } func TestStmtExecContext(t *testing.T) { db := openTestConn(t) defer db.Close() tests := []struct { name string ctx func() (context.Context, context.CancelFunc) sql string cancelExpected bool }{ { name: "context.Background", ctx: func() (context.Context, context.CancelFunc) { return context.Background(), nil }, sql: "SELECT pg_sleep(1);", cancelExpected: false, }, { name: "context.WithTimeout exceeded", ctx: func() (context.Context, context.CancelFunc) { return context.WithTimeout(context.Background(), 1*time.Second) }, sql: "SELECT pg_sleep(10);", cancelExpected: true, }, { name: "context.WithTimeout", ctx: func() (context.Context, context.CancelFunc) { return context.WithTimeout(context.Background(), time.Minute) }, sql: "SELECT pg_sleep(1);", cancelExpected: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx, cancel := tt.ctx() if cancel != nil { defer cancel() } stmt, err := db.PrepareContext(ctx, tt.sql) if err != nil { t.Fatal(err) } _, err = stmt.ExecContext(ctx) pgErr := (*Error)(nil) switch { case (err != nil) != tt.cancelExpected: t.Fatalf("stmt.QueryContext() unexpected nil err got = %v, cancelExpected = %v", err, tt.cancelExpected) case (err != nil && tt.cancelExpected) && !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode): t.Errorf("stmt.QueryContext() got = %v, cancelExpected = %v", err.Error(), tt.cancelExpected) } }) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/copy_test.go0000644000000000000000000003654515024302467022304 0ustar rootrootpackage pq import ( "bytes" "database/sql" "database/sql/driver" "fmt" "net" "strings" "testing" "time" ) func TestCopyInStmt(t *testing.T) { stmt := CopyIn("table name") if stmt != `COPY "table name" () FROM STDIN` { t.Fatal(stmt) } stmt = CopyIn("table name", "column 1", "column 2") if stmt != `COPY "table name" ("column 1", "column 2") FROM STDIN` { t.Fatal(stmt) } stmt = CopyIn(`table " name """`, `co"lumn""`) if stmt != `COPY "table "" name """"""" ("co""lumn""""") FROM STDIN` { t.Fatal(stmt) } } func TestCopyInSchemaStmt(t *testing.T) { stmt := CopyInSchema("schema name", "table name") if stmt != `COPY "schema name"."table name" () FROM STDIN` { t.Fatal(stmt) } stmt = CopyInSchema("schema name", "table name", "column 1", "column 2") if stmt != `COPY "schema name"."table name" ("column 1", "column 2") FROM STDIN` { t.Fatal(stmt) } stmt = CopyInSchema(`schema " name """`, `table " name """`, `co"lumn""`) if stmt != `COPY "schema "" name """"""".`+ `"table "" name """"""" ("co""lumn""""") FROM STDIN` { t.Fatal(stmt) } } func TestCopyInMultipleValues(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") if err != nil { t.Fatal(err) } stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) if err != nil { t.Fatal(err) } longString := strings.Repeat("#", 500) for i := 0; i < 500; i++ { _, err = stmt.Exec(int64(i), longString) if err != nil { t.Fatal(err) } } result, err := stmt.Exec() if err != nil { t.Fatal(err) } rowsAffected, err := result.RowsAffected() if err != nil { t.Fatal(err) } if rowsAffected != 500 { t.Fatalf("expected 500 rows affected, not %d", rowsAffected) } err = stmt.Close() if err != nil { t.Fatal(err) } var num int err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) if err != nil { t.Fatal(err) } if num != 500 { t.Fatalf("expected 500 items, not %d", num) } } func TestCopyInRaiseStmtTrigger(t *testing.T) { db := openTestConn(t) defer db.Close() if getServerVersion(t, db) < 90000 { var exists int err := db.QueryRow("SELECT 1 FROM pg_language WHERE lanname = 'plpgsql'").Scan(&exists) if err == sql.ErrNoRows { t.Skip("language PL/PgSQL does not exist; skipping TestCopyInRaiseStmtTrigger") } else if err != nil { t.Fatal(err) } } txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") if err != nil { t.Fatal(err) } _, err = txn.Exec(` CREATE OR REPLACE FUNCTION pg_temp.temptest() RETURNS trigger AS $BODY$ begin raise notice 'Hello world'; return new; end $BODY$ LANGUAGE plpgsql`) if err != nil { t.Fatal(err) } _, err = txn.Exec(` CREATE TRIGGER temptest_trigger BEFORE INSERT ON temp FOR EACH ROW EXECUTE PROCEDURE pg_temp.temptest()`) if err != nil { t.Fatal(err) } stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) if err != nil { t.Fatal(err) } longString := strings.Repeat("#", 500) _, err = stmt.Exec(int64(1), longString) if err != nil { t.Fatal(err) } _, err = stmt.Exec() if err != nil { t.Fatal(err) } err = stmt.Close() if err != nil { t.Fatal(err) } var num int err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) if err != nil { t.Fatal(err) } if num != 1 { t.Fatalf("expected 1 items, not %d", num) } } func TestCopyInTypes(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER, text VARCHAR, blob BYTEA, nothing VARCHAR)") if err != nil { t.Fatal(err) } stmt, err := txn.Prepare(CopyIn("temp", "num", "text", "blob", "nothing")) if err != nil { t.Fatal(err) } _, err = stmt.Exec(int64(1234567890), "Héllö\n ☃!\r\t\\", []byte{0, 255, 9, 10, 13}, nil) if err != nil { t.Fatal(err) } _, err = stmt.Exec() if err != nil { t.Fatal(err) } err = stmt.Close() if err != nil { t.Fatal(err) } var num int var text string var blob []byte var nothing sql.NullString err = txn.QueryRow("SELECT * FROM temp").Scan(&num, &text, &blob, ¬hing) if err != nil { t.Fatal(err) } if num != 1234567890 { t.Fatal("unexpected result", num) } if text != "Héllö\n ☃!\r\t\\" { t.Fatal("unexpected result", text) } if !bytes.Equal(blob, []byte{0, 255, 9, 10, 13}) { t.Fatal("unexpected result", blob) } if nothing.Valid { t.Fatal("unexpected result", nothing.String) } } func TestCopyInWrongType(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") if err != nil { t.Fatal(err) } stmt, err := txn.Prepare(CopyIn("temp", "num")) if err != nil { t.Fatal(err) } defer stmt.Close() _, err = stmt.Exec("Héllö\n ☃!\r\t\\") if err != nil { t.Fatal(err) } _, err = stmt.Exec() if err == nil { t.Fatal("expected error") } if pge := err.(*Error); pge.Code.Name() != "invalid_text_representation" { t.Fatalf("expected 'invalid input syntax for integer' error, got %s (%+v)", pge.Code.Name(), pge) } } func TestCopyOutsideOfTxnError(t *testing.T) { db := openTestConn(t) defer db.Close() _, err := db.Prepare(CopyIn("temp", "num")) if err == nil { t.Fatal("COPY outside of transaction did not return an error") } if err != errCopyNotSupportedOutsideTxn { t.Fatalf("expected %s, got %s", err, err.Error()) } } func TestCopyInBinaryError(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") if err != nil { t.Fatal(err) } _, err = txn.Prepare("COPY temp (num) FROM STDIN WITH binary") if err != errBinaryCopyNotSupported { t.Fatalf("expected %s, got %+v", errBinaryCopyNotSupported, err) } // check that the protocol is in a valid state err = txn.Rollback() if err != nil { t.Fatal(err) } } func TestCopyFromError(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMP TABLE temp (num INTEGER)") if err != nil { t.Fatal(err) } _, err = txn.Prepare("COPY temp (num) TO STDOUT") if err != errCopyToNotSupported { t.Fatalf("expected %s, got %+v", errCopyToNotSupported, err) } // check that the protocol is in a valid state err = txn.Rollback() if err != nil { t.Fatal(err) } } func TestCopySyntaxError(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Prepare("COPY ") if err == nil { t.Fatal("expected error") } if pge := err.(*Error); pge.Code.Name() != "syntax_error" { t.Fatalf("expected syntax error, got %s (%+v)", pge.Code.Name(), pge) } // check that the protocol is in a valid state err = txn.Rollback() if err != nil { t.Fatal(err) } } // Tests for connection errors in copyin.resploop() func TestCopyRespLoopConnectionError(t *testing.T) { db := openTestConn(t) defer db.Close() txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() var pid int err = txn.QueryRow("SELECT pg_backend_pid()").Scan(&pid) if err != nil { t.Fatal(err) } _, err = txn.Exec("CREATE TEMP TABLE temp (a int)") if err != nil { t.Fatal(err) } stmt, err := txn.Prepare(CopyIn("temp", "a")) if err != nil { t.Fatal(err) } defer stmt.Close() _, err = db.Exec("SELECT pg_terminate_backend($1)", pid) if err != nil { t.Fatal(err) } if getServerVersion(t, db) < 90500 { // We have to try and send something over, since postgres before // version 9.5 won't process SIGTERMs while it's waiting for // CopyData/CopyEnd messages; see tcop/postgres.c. _, err = stmt.Exec(1) if err != nil { t.Fatal(err) } } retry(t, time.Second*5, func() error { _, err = stmt.Exec() if err == nil { return fmt.Errorf("expected error") } return nil }) switch pge := err.(type) { case *Error: if pge.Code.Name() != "admin_shutdown" { t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name()) } case *net.OpError: // ignore default: if err == driver.ErrBadConn { // likely an EPIPE } else if err == errCopyInClosed { // ignore } else { t.Fatalf("unexpected error, got %+#v", err) } } _ = stmt.Close() } // retry executes f in a backoff loop until it doesn't return an error. If this // doesn't happen within duration, t.Fatal is called with the latest error. func retry(t *testing.T, duration time.Duration, f func() error) { start := time.Now() next := time.Millisecond * 100 for { err := f() if err == nil { return } if time.Since(start) > duration { t.Fatal(err) } time.Sleep(next) next *= 2 } } func BenchmarkCopyIn(b *testing.B) { db := openTestConn(b) defer db.Close() txn, err := db.Begin() if err != nil { b.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("CREATE TEMP TABLE temp (a int, b varchar)") if err != nil { b.Fatal(err) } stmt, err := txn.Prepare(CopyIn("temp", "a", "b")) if err != nil { b.Fatal(err) } for i := 0; i < b.N; i++ { _, err = stmt.Exec(int64(i), "hello world!") if err != nil { b.Fatal(err) } } _, err = stmt.Exec() if err != nil { b.Fatal(err) } err = stmt.Close() if err != nil { b.Fatal(err) } var num int err = txn.QueryRow("SELECT COUNT(*) FROM temp").Scan(&num) if err != nil { b.Fatal(err) } if num != b.N { b.Fatalf("expected %d items, not %d", b.N, num) } } var bigTableColumns = []string{"ABIOGENETICALLY", "ABORIGINALITIES", "ABSORBABILITIES", "ABSORBEFACIENTS", "ABSORPTIOMETERS", "ABSTRACTIONISMS", "ABSTRACTIONISTS", "ACANTHOCEPHALAN", "ACCEPTABILITIES", "ACCEPTINGNESSES", "ACCESSARINESSES", "ACCESSIBILITIES", "ACCESSORINESSES", "ACCIDENTALITIES", "ACCIDENTOLOGIES", "ACCLIMATISATION", "ACCLIMATIZATION", "ACCOMMODATINGLY", "ACCOMMODATIONAL", "ACCOMPLISHMENTS", "ACCOUNTABLENESS", "ACCOUNTANTSHIPS", "ACCULTURATIONAL", "ACETOPHENETIDIN", "ACETYLSALICYLIC", "ACHONDROPLASIAS", "ACHONDROPLASTIC", "ACHROMATICITIES", "ACHROMATISATION", "ACHROMATIZATION", "ACIDIMETRICALLY", "ACKNOWLEDGEABLE", "ACKNOWLEDGEABLY", "ACKNOWLEDGEMENT", "ACKNOWLEDGMENTS", "ACQUIRABILITIES", "ACQUISITIVENESS", "ACRIMONIOUSNESS", "ACROPARESTHESIA", "ACTINOBIOLOGIES", "ACTINOCHEMISTRY", "ACTINOTHERAPIES", "ADAPTABLENESSES", "ADDITIONALITIES", "ADENOCARCINOMAS", "ADENOHYPOPHYSES", "ADENOHYPOPHYSIS", "ADENOIDECTOMIES", "ADIATHERMANCIES", "ADJUSTABILITIES", "ADMINISTRATIONS", "ADMIRABLENESSES", "ADMISSIBILITIES", "ADRENALECTOMIES", "ADSORBABILITIES", "ADVENTUROUSNESS", "ADVERSARINESSES", "ADVISABLENESSES", "AERODYNAMICALLY", "AERODYNAMICISTS", "AEROELASTICIANS", "AEROHYDROPLANES", "AEROLITHOLOGIES", "AEROSOLISATIONS", "AEROSOLIZATIONS", "AFFECTABILITIES", "AFFECTIVENESSES", "AFFORDABILITIES", "AFFRANCHISEMENT", "AFTERSENSATIONS", "AGGLUTINABILITY", "AGGRANDISEMENTS", "AGGRANDIZEMENTS", "AGGREGATENESSES", "AGRANULOCYTOSES", "AGRANULOCYTOSIS", "AGREEABLENESSES", "AGRIBUSINESSMAN", "AGRIBUSINESSMEN", "AGRICULTURALIST", "AIRWORTHINESSES", "ALCOHOLISATIONS", "ALCOHOLIZATIONS", "ALCOHOLOMETRIES", "ALEXIPHARMAKONS", "ALGORITHMICALLY", "ALKALINISATIONS", "ALKALINIZATIONS", "ALLEGORICALNESS", "ALLEGORISATIONS", "ALLEGORIZATIONS", "ALLELOMORPHISMS", "ALLERGENICITIES", "ALLOTETRAPLOIDS", "ALLOTETRAPLOIDY", "ALLOTRIOMORPHIC", "ALLOWABLENESSES", "ALPHABETISATION", "ALPHABETIZATION", "ALTERNATIVENESS", "ALTITUDINARIANS", "ALUMINOSILICATE", "ALUMINOTHERMIES", "AMARYLLIDACEOUS", "AMBASSADORSHIPS", "AMBIDEXTERITIES", "AMBIGUOUSNESSES", "AMBISEXUALITIES", "AMBITIOUSNESSES", "AMINOPEPTIDASES", "AMINOPHENAZONES", "AMMONIFICATIONS", "AMORPHOUSNESSES", "AMPHIDIPLOIDIES", "AMPHITHEATRICAL", "ANACOLUTHICALLY", "ANACREONTICALLY", "ANAESTHESIOLOGY", "ANAESTHETICALLY", "ANAGRAMMATISING", "ANAGRAMMATIZING", "ANALOGOUSNESSES", "ANALYZABILITIES", "ANAMORPHOSCOPES", "ANCYLOSTOMIASES", "ANCYLOSTOMIASIS", "ANDROGYNOPHORES", "ANDROMEDOTOXINS", "ANDROMONOECIOUS", "ANDROMONOECISMS", "ANESTHETIZATION", "ANFRACTUOSITIES", "ANGUSTIROSTRATE", "ANIMATRONICALLY", "ANISOTROPICALLY", "ANKYLOSTOMIASES", "ANKYLOSTOMIASIS", "ANNIHILATIONISM", "ANOMALISTICALLY", "ANOMALOUSNESSES", "ANONYMOUSNESSES", "ANSWERABILITIES", "ANTAGONISATIONS", "ANTAGONIZATIONS", "ANTAPHRODISIACS", "ANTEPENULTIMATE", "ANTHROPOBIOLOGY", "ANTHROPOCENTRIC", "ANTHROPOGENESES", "ANTHROPOGENESIS", "ANTHROPOGENETIC", "ANTHROPOLATRIES", "ANTHROPOLOGICAL", "ANTHROPOLOGISTS", "ANTHROPOMETRIES", "ANTHROPOMETRIST", "ANTHROPOMORPHIC", "ANTHROPOPATHIES", "ANTHROPOPATHISM", "ANTHROPOPHAGIES", "ANTHROPOPHAGITE", "ANTHROPOPHAGOUS", "ANTHROPOPHOBIAS", "ANTHROPOPHOBICS", "ANTHROPOPHUISMS", "ANTHROPOPSYCHIC", "ANTHROPOSOPHIES", "ANTHROPOSOPHIST", "ANTIABORTIONIST", "ANTIALCOHOLISMS", "ANTIAPHRODISIAC", "ANTIARRHYTHMICS", "ANTICAPITALISMS", "ANTICAPITALISTS", "ANTICARCINOGENS", "ANTICHOLESTEROL", "ANTICHOLINERGIC", "ANTICHRISTIANLY", "ANTICLERICALISM", "ANTICLIMACTICAL", "ANTICOINCIDENCE", "ANTICOLONIALISM", "ANTICOLONIALIST", "ANTICOMPETITIVE", "ANTICONVULSANTS", "ANTICONVULSIVES", "ANTIDEPRESSANTS", "ANTIDERIVATIVES", "ANTIDEVELOPMENT", "ANTIEDUCATIONAL", "ANTIEGALITARIAN", "ANTIFASHIONABLE", "ANTIFEDERALISTS", "ANTIFERROMAGNET", "ANTIFORECLOSURE", "ANTIHELMINTHICS", "ANTIHISTAMINICS", "ANTILIBERALISMS", "ANTILIBERTARIAN", "ANTILOGARITHMIC", "ANTIMATERIALISM", "ANTIMATERIALIST", "ANTIMETABOLITES", "ANTIMILITARISMS", "ANTIMILITARISTS", "ANTIMONARCHICAL", "ANTIMONARCHISTS", "ANTIMONOPOLISTS", "ANTINATIONALIST", "ANTINUCLEARISTS", "ANTIODONTALGICS", "ANTIPERISTALSES", "ANTIPERISTALSIS", "ANTIPERISTALTIC", "ANTIPERSPIRANTS", "ANTIPHLOGISTICS", "ANTIPORNOGRAPHY", "ANTIPROGRESSIVE", "ANTIQUARIANISMS", "ANTIRADICALISMS", "ANTIRATIONALISM", "ANTIRATIONALIST", "ANTIRATIONALITY", "ANTIREPUBLICANS", "ANTIROMANTICISM", "ANTISEGREGATION", "ANTISENTIMENTAL", "ANTISEPARATISTS", "ANTISEPTICISING", "ANTISEPTICIZING", "ANTISEXUALITIES", "ANTISHOPLIFTING", "ANTISOCIALITIES", "ANTISPECULATION", "ANTISPECULATIVE", "ANTISYPHILITICS", "ANTITHEORETICAL", "ANTITHROMBOTICS", "ANTITRADITIONAL", "ANTITRANSPIRANT", "ANTITRINITARIAN", "ANTITUBERCULOUS", "ANTIVIVISECTION", "APHELIOTROPISMS", "APOCALYPTICALLY", "APOCALYPTICISMS", "APOLIPOPROTEINS", "APOLITICALITIES", "APOPHTHEGMATISE", "APOPHTHEGMATIST", "APOPHTHEGMATIZE", "APOTHEGMATISING", "APOTHEGMATIZING", "APPEALABILITIES", "APPEALINGNESSES", "APPENDICULARIAN", "APPLICABILITIES", "APPRENTICEHOODS", "APPRENTICEMENTS", "APPRENTICESHIPS", "APPROACHABILITY", "APPROPINQUATING", "APPROPINQUATION", "APPROPINQUITIES", "APPROPRIATENESS", "ARACHNOIDITISES", "ARBITRARINESSES", "ARBORICULTURIST", "ARCHAEBACTERIUM", "ARCHAEOBOTANIES", "ARCHAEOBOTANIST", "ARCHAEOMETRISTS", "ARCHAEOPTERYXES", "ARCHAEZOOLOGIES", "ARCHEOASTRONOMY", "ARCHEOBOTANISTS", "ARCHEOLOGICALLY", "ARCHEOMAGNETISM", "ARCHEOZOOLOGIES", "ARCHEOZOOLOGIST", "ARCHGENETHLIACS", "ARCHIDIACONATES", "ARCHIEPISCOPACY", "ARCHIEPISCOPATE", "ARCHITECTURALLY", "ARCHPRIESTHOODS", "ARCHPRIESTSHIPS", "ARGUMENTATIVELY", "ARIBOFLAVINOSES", "ARIBOFLAVINOSIS", "AROMATHERAPISTS", "ARRONDISSEMENTS", "ARTERIALISATION", "ARTERIALIZATION", "ARTERIOGRAPHIES", "ARTIFICIALISING", "ARTIFICIALITIES", "ARTIFICIALIZING", "ASCLEPIADACEOUS", "ASSENTIVENESSES"} func BenchmarkCopy(b *testing.B) { for i := 0; i < b.N; i++ { CopyIn("temp", bigTableColumns...) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/notify.go0000644000000000000000000006177215024302467021603 0ustar rootrootpackage pq // Package pq is a pure Go Postgres driver for the database/sql package. // This module contains support for Postgres LISTEN/NOTIFY. import ( "context" "database/sql/driver" "errors" "fmt" "sync" "sync/atomic" "time" ) // Notification represents a single notification from the database. type Notification struct { // Process ID (PID) of the notifying postgres backend. BePid int // Name of the channel the notification was sent on. Channel string // Payload, or the empty string if unspecified. Extra string } func recvNotification(r *readBuf) *Notification { bePid := r.int32() channel := r.string() extra := r.string() return &Notification{bePid, channel, extra} } // SetNotificationHandler sets the given notification handler on the given // connection. A runtime panic occurs if c is not a pq connection. A nil handler // may be used to unset it. // // Note: Notification handlers are executed synchronously by pq meaning commands // won't continue to be processed until the handler returns. func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { c.(*conn).notificationHandler = handler } // NotificationHandlerConnector wraps a regular connector and sets a notification handler // on it. type NotificationHandlerConnector struct { driver.Connector notificationHandler func(*Notification) } // Connect calls the underlying connector's connect method and then sets the // notification handler. func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { c, err := n.Connector.Connect(ctx) if err == nil { SetNotificationHandler(c, n.notificationHandler) } return c, err } // ConnectorNotificationHandler returns the currently set notification handler, if any. If // the given connector is not a result of ConnectorWithNotificationHandler, nil is // returned. func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { if c, ok := c.(*NotificationHandlerConnector); ok { return c.notificationHandler } return nil } // ConnectorWithNotificationHandler creates or sets the given handler for the given // connector. If the given connector is a result of calling this function // previously, it is simply set on the given connector and returned. Otherwise, // this returns a new connector wrapping the given one and setting the notification // handler. A nil notification handler may be used to unset it. // // The returned connector is intended to be used with database/sql.OpenDB. // // Note: Notification handlers are executed synchronously by pq meaning commands // won't continue to be processed until the handler returns. func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { if c, ok := c.(*NotificationHandlerConnector); ok { c.notificationHandler = handler return c } return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} } const ( connStateIdle int32 = iota connStateExpectResponse connStateExpectReadyForQuery ) type message struct { typ byte err error } var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") // ListenerConn is a low-level interface for waiting for notifications. You // should use Listener instead. type ListenerConn struct { // guards cn and err connectionLock sync.Mutex cn *conn err error connState int32 // the sending goroutine will be holding this lock senderLock sync.Mutex notificationChan chan<- *Notification replyChan chan message } // NewListenerConn creates a new ListenerConn. Use NewListener instead. func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { return newDialListenerConn(defaultDialer{}, name, notificationChan) } func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { cn, err := DialOpen(d, name) if err != nil { return nil, err } l := &ListenerConn{ cn: cn.(*conn), notificationChan: c, connState: connStateIdle, replyChan: make(chan message, 2), } go l.listenerConnMain() return l, nil } // We can only allow one goroutine at a time to be running a query on the // connection for various reasons, so the goroutine sending on the connection // must be holding senderLock. // // Returns an error if an unrecoverable error has occurred and the ListenerConn // should be abandoned. func (l *ListenerConn) acquireSenderLock() error { // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery l.senderLock.Lock() l.connectionLock.Lock() err := l.err l.connectionLock.Unlock() if err != nil { l.senderLock.Unlock() return err } return nil } func (l *ListenerConn) releaseSenderLock() { l.senderLock.Unlock() } // setState advances the protocol state to newState. Returns false if moving // to that state from the current state is not allowed. func (l *ListenerConn) setState(newState int32) bool { var expectedState int32 switch newState { case connStateIdle: expectedState = connStateExpectReadyForQuery case connStateExpectResponse: expectedState = connStateIdle case connStateExpectReadyForQuery: expectedState = connStateExpectResponse default: panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) } return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) } // Main logic is here: receive messages from the postgres backend, forward // notifications and query replies and keep the internal state in sync with the // protocol state. Returns when the connection has been lost, is about to go // away or should be discarded because we couldn't agree on the state with the // server backend. func (l *ListenerConn) listenerConnLoop() (err error) { defer errRecoverNoErrBadConn(&err) r := &readBuf{} for { t, err := l.cn.recvMessage(r) if err != nil { return err } switch t { case 'A': // recvNotification copies all the data so we don't need to worry // about the scratch buffer being overwritten. l.notificationChan <- recvNotification(r) case 'T', 'D': // only used by tests; ignore case 'E': // We might receive an ErrorResponse even when not in a query; it // is expected that the server will close the connection after // that, but we should make sure that the error we display is the // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. if !l.setState(connStateExpectReadyForQuery) { return parseError(r) } l.replyChan <- message{t, parseError(r)} case 'C', 'I': if !l.setState(connStateExpectReadyForQuery) { // protocol out of sync return fmt.Errorf("unexpected CommandComplete") } // ExecSimpleQuery doesn't need to know about this message case 'Z': if !l.setState(connStateIdle) { // protocol out of sync return fmt.Errorf("unexpected ReadyForQuery") } l.replyChan <- message{t, nil} case 'S': // ignore case 'N': if n := l.cn.noticeHandler; n != nil { n(parseError(r)) } default: return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) } } } // This is the main routine for the goroutine receiving on the database // connection. Most of the main logic is in listenerConnLoop. func (l *ListenerConn) listenerConnMain() { err := l.listenerConnLoop() // listenerConnLoop terminated; we're done, but we still have to clean up. // Make sure nobody tries to start any new queries by making sure the err // pointer is set. It is important that we do not overwrite its value; a // connection could be closed by either this goroutine or one sending on // the connection -- whoever closes the connection is assumed to have the // more meaningful error message (as the other one will probably get // net.errClosed), so that goroutine sets the error we expose while the // other error is discarded. If the connection is lost while two // goroutines are operating on the socket, it probably doesn't matter which // error we expose so we don't try to do anything more complex. l.connectionLock.Lock() if l.err == nil { l.err = err } l.cn.Close() l.connectionLock.Unlock() // There might be a query in-flight; make sure nobody's waiting for a // response to it, since there's not going to be one. close(l.replyChan) // let the listener know we're done close(l.notificationChan) // this ListenerConn is done } // Listen sends a LISTEN query to the server. See ExecSimpleQuery. func (l *ListenerConn) Listen(channel string) (bool, error) { return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) } // Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. func (l *ListenerConn) Unlisten(channel string) (bool, error) { return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) } // UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. func (l *ListenerConn) UnlistenAll() (bool, error) { return l.ExecSimpleQuery("UNLISTEN *") } // Ping the remote server to make sure it's alive. Non-nil error means the // connection has failed and should be abandoned. func (l *ListenerConn) Ping() error { sent, err := l.ExecSimpleQuery("") if !sent { return err } if err != nil { // shouldn't happen panic(err) } return nil } // Attempt to send a query on the connection. Returns an error if sending the // query failed, and the caller should initiate closure of this connection. // The caller must be holding senderLock (see acquireSenderLock and // releaseSenderLock). func (l *ListenerConn) sendSimpleQuery(q string) (err error) { defer errRecoverNoErrBadConn(&err) // must set connection state before sending the query if !l.setState(connStateExpectResponse) { panic("two queries running at the same time") } // Can't use l.cn.writeBuf here because it uses the scratch buffer which // might get overwritten by listenerConnLoop. b := &writeBuf{ buf: []byte("Q\x00\x00\x00\x00"), pos: 1, } b.string(q) l.cn.send(b) return nil } // ExecSimpleQuery executes a "simple query" (i.e. one with no bindable // parameters) on the connection. The possible return values are: // 1) "executed" is true; the query was executed to completion on the // database server. If the query failed, err will be set to the error // returned by the database, otherwise err will be nil. // 2) If "executed" is false, the query could not be executed on the remote // server. err will be non-nil. // // After a call to ExecSimpleQuery has returned an executed=false value, the // connection has either been closed or will be closed shortly thereafter, and // all subsequently executed queries will return an error. func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { if err = l.acquireSenderLock(); err != nil { return false, err } defer l.releaseSenderLock() err = l.sendSimpleQuery(q) if err != nil { // We can't know what state the protocol is in, so we need to abandon // this connection. l.connectionLock.Lock() // Set the error pointer if it hasn't been set already; see // listenerConnMain. if l.err == nil { l.err = err } l.connectionLock.Unlock() l.cn.c.Close() return false, err } // now we just wait for a reply.. for { m, ok := <-l.replyChan if !ok { // We lost the connection to server, don't bother waiting for a // a response. err should have been set already. l.connectionLock.Lock() err := l.err l.connectionLock.Unlock() return false, err } switch m.typ { case 'Z': // sanity check if m.err != nil { panic("m.err != nil") } // done; err might or might not be set return true, err case 'E': // sanity check if m.err == nil { panic("m.err == nil") } // server responded with an error; ReadyForQuery to follow err = m.err default: return false, fmt.Errorf("unknown response for simple query: %q", m.typ) } } } // Close closes the connection. func (l *ListenerConn) Close() error { l.connectionLock.Lock() if l.err != nil { l.connectionLock.Unlock() return errListenerConnClosed } l.err = errListenerConnClosed l.connectionLock.Unlock() // We can't send anything on the connection without holding senderLock. // Simply close the net.Conn to wake up everyone operating on it. return l.cn.c.Close() } // Err returns the reason the connection was closed. It is not safe to call // this function until l.Notify has been closed. func (l *ListenerConn) Err() error { return l.err } var errListenerClosed = errors.New("pq: Listener has been closed") // ErrChannelAlreadyOpen is returned from Listen when a channel is already // open. var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") // ErrChannelNotOpen is returned from Unlisten when a channel is not open. var ErrChannelNotOpen = errors.New("pq: channel is not open") // ListenerEventType is an enumeration of listener event types. type ListenerEventType int const ( // ListenerEventConnected is emitted only when the database connection // has been initially initialized. The err argument of the callback // will always be nil. ListenerEventConnected ListenerEventType = iota // ListenerEventDisconnected is emitted after a database connection has // been lost, either because of an error or because Close has been // called. The err argument will be set to the reason the database // connection was lost. ListenerEventDisconnected // ListenerEventReconnected is emitted after a database connection has // been re-established after connection loss. The err argument of the // callback will always be nil. After this event has been emitted, a // nil pq.Notification is sent on the Listener.Notify channel. ListenerEventReconnected // ListenerEventConnectionAttemptFailed is emitted after a connection // to the database was attempted, but failed. The err argument will be // set to an error describing why the connection attempt did not // succeed. ListenerEventConnectionAttemptFailed ) // EventCallbackType is the event callback type. See also ListenerEventType // constants' documentation. type EventCallbackType func(event ListenerEventType, err error) // Listener provides an interface for listening to notifications from a // PostgreSQL database. For general usage information, see section // "Notifications". // // Listener can safely be used from concurrently running goroutines. type Listener struct { // Channel for receiving notifications from the database. In some cases a // nil value will be sent. See section "Notifications" above. Notify chan *Notification name string minReconnectInterval time.Duration maxReconnectInterval time.Duration dialer Dialer eventCallback EventCallbackType lock sync.Mutex isClosed bool reconnectCond *sync.Cond cn *ListenerConn connNotificationChan <-chan *Notification channels map[string]struct{} } // NewListener creates a new database connection dedicated to LISTEN / NOTIFY. // // name should be set to a connection string to be used to establish the // database connection (see section "Connection String Parameters" above). // // minReconnectInterval controls the duration to wait before trying to // re-establish the database connection after connection loss. After each // consecutive failure this interval is doubled, until maxReconnectInterval is // reached. Successfully completing the connection establishment procedure // resets the interval back to minReconnectInterval. // // The last parameter eventCallback can be set to a function which will be // called by the Listener when the state of the underlying database connection // changes. This callback will be called by the goroutine which dispatches the // notifications over the Notify channel, so you should try to avoid doing // potentially time-consuming operations from the callback. func NewListener(name string, minReconnectInterval time.Duration, maxReconnectInterval time.Duration, eventCallback EventCallbackType) *Listener { return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) } // NewDialListener is like NewListener but it takes a Dialer. func NewDialListener(d Dialer, name string, minReconnectInterval time.Duration, maxReconnectInterval time.Duration, eventCallback EventCallbackType) *Listener { l := &Listener{ name: name, minReconnectInterval: minReconnectInterval, maxReconnectInterval: maxReconnectInterval, dialer: d, eventCallback: eventCallback, channels: make(map[string]struct{}), Notify: make(chan *Notification, 32), } l.reconnectCond = sync.NewCond(&l.lock) go l.listenerMain() return l } // NotificationChannel returns the notification channel for this listener. // This is the same channel as Notify, and will not be recreated during the // life time of the Listener. func (l *Listener) NotificationChannel() <-chan *Notification { return l.Notify } // Listen starts listening for notifications on a channel. Calls to this // function will block until an acknowledgement has been received from the // server. Note that Listener automatically re-establishes the connection // after connection loss, so this function may block indefinitely if the // connection can not be re-established. // // Listen will only fail in three conditions: // 1) The channel is already open. The returned error will be // ErrChannelAlreadyOpen. // 2) The query was executed on the remote server, but PostgreSQL returned an // error message in response to the query. The returned error will be a // pq.Error containing the information the server supplied. // 3) Close is called on the Listener before the request could be completed. // // The channel name is case-sensitive. func (l *Listener) Listen(channel string) error { l.lock.Lock() defer l.lock.Unlock() if l.isClosed { return errListenerClosed } // The server allows you to issue a LISTEN on a channel which is already // open, but it seems useful to be able to detect this case to spot for // mistakes in application logic. If the application genuinely does't // care, it can check the exported error and ignore it. _, exists := l.channels[channel] if exists { return ErrChannelAlreadyOpen } if l.cn != nil { // If gotResponse is true but error is set, the query was executed on // the remote server, but resulted in an error. This should be // relatively rare, so it's fine if we just pass the error to our // caller. However, if gotResponse is false, we could not complete the // query on the remote server and our underlying connection is about // to go away, so we only add relname to l.channels, and wait for // resync() to take care of the rest. gotResponse, err := l.cn.Listen(channel) if gotResponse && err != nil { return err } } l.channels[channel] = struct{}{} for l.cn == nil { l.reconnectCond.Wait() // we let go of the mutex for a while if l.isClosed { return errListenerClosed } } return nil } // Unlisten removes a channel from the Listener's channel list. Returns // ErrChannelNotOpen if the Listener is not listening on the specified channel. // Returns immediately with no error if there is no connection. Note that you // might still get notifications for this channel even after Unlisten has // returned. // // The channel name is case-sensitive. func (l *Listener) Unlisten(channel string) error { l.lock.Lock() defer l.lock.Unlock() if l.isClosed { return errListenerClosed } // Similarly to LISTEN, this is not an error in Postgres, but it seems // useful to distinguish from the normal conditions. _, exists := l.channels[channel] if !exists { return ErrChannelNotOpen } if l.cn != nil { // Similarly to Listen (see comment in that function), the caller // should only be bothered with an error if it came from the backend as // a response to our query. gotResponse, err := l.cn.Unlisten(channel) if gotResponse && err != nil { return err } } // Don't bother waiting for resync if there's no connection. delete(l.channels, channel) return nil } // UnlistenAll removes all channels from the Listener's channel list. Returns // immediately with no error if there is no connection. Note that you might // still get notifications for any of the deleted channels even after // UnlistenAll has returned. func (l *Listener) UnlistenAll() error { l.lock.Lock() defer l.lock.Unlock() if l.isClosed { return errListenerClosed } if l.cn != nil { // Similarly to Listen (see comment in that function), the caller // should only be bothered with an error if it came from the backend as // a response to our query. gotResponse, err := l.cn.UnlistenAll() if gotResponse && err != nil { return err } } // Don't bother waiting for resync if there's no connection. l.channels = make(map[string]struct{}) return nil } // Ping the remote server to make sure it's alive. Non-nil return value means // that there is no active connection. func (l *Listener) Ping() error { l.lock.Lock() defer l.lock.Unlock() if l.isClosed { return errListenerClosed } if l.cn == nil { return errors.New("no connection") } return l.cn.Ping() } // Clean up after losing the server connection. Returns l.cn.Err(), which // should have the reason the connection was lost. func (l *Listener) disconnectCleanup() error { l.lock.Lock() defer l.lock.Unlock() // sanity check; can't look at Err() until the channel has been closed select { case _, ok := <-l.connNotificationChan: if ok { panic("connNotificationChan not closed") } default: panic("connNotificationChan not closed") } err := l.cn.Err() l.cn.Close() l.cn = nil return err } // Synchronize the list of channels we want to be listening on with the server // after the connection has been established. func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { doneChan := make(chan error) go func(notificationChan <-chan *Notification) { for channel := range l.channels { // If we got a response, return that error to our caller as it's // going to be more descriptive than cn.Err(). gotResponse, err := cn.Listen(channel) if gotResponse && err != nil { doneChan <- err return } // If we couldn't reach the server, wait for notificationChan to // close and then return the error message from the connection, as // per ListenerConn's interface. if err != nil { for range notificationChan { } doneChan <- cn.Err() return } } doneChan <- nil }(notificationChan) // Ignore notifications while synchronization is going on to avoid // deadlocks. We have to send a nil notification over Notify anyway as // we can't possibly know which notifications (if any) were lost while // the connection was down, so there's no reason to try and process // these messages at all. for { select { case _, ok := <-notificationChan: if !ok { notificationChan = nil } case err := <-doneChan: return err } } } // caller should NOT be holding l.lock func (l *Listener) closed() bool { l.lock.Lock() defer l.lock.Unlock() return l.isClosed } func (l *Listener) connect() error { notificationChan := make(chan *Notification, 32) cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) if err != nil { return err } l.lock.Lock() defer l.lock.Unlock() err = l.resync(cn, notificationChan) if err != nil { cn.Close() return err } l.cn = cn l.connNotificationChan = notificationChan l.reconnectCond.Broadcast() return nil } // Close disconnects the Listener from the database and shuts it down. // Subsequent calls to its methods will return an error. Close returns an // error if the connection has already been closed. func (l *Listener) Close() error { l.lock.Lock() defer l.lock.Unlock() if l.isClosed { return errListenerClosed } if l.cn != nil { l.cn.Close() } l.isClosed = true // Unblock calls to Listen() l.reconnectCond.Broadcast() return nil } func (l *Listener) emitEvent(event ListenerEventType, err error) { if l.eventCallback != nil { l.eventCallback(event, err) } } // Main logic here: maintain a connection to the server when possible, wait // for notifications and emit events. func (l *Listener) listenerConnLoop() { var nextReconnect time.Time reconnectInterval := l.minReconnectInterval for { for { err := l.connect() if err == nil { break } if l.closed() { return } l.emitEvent(ListenerEventConnectionAttemptFailed, err) time.Sleep(reconnectInterval) reconnectInterval *= 2 if reconnectInterval > l.maxReconnectInterval { reconnectInterval = l.maxReconnectInterval } } if nextReconnect.IsZero() { l.emitEvent(ListenerEventConnected, nil) } else { l.emitEvent(ListenerEventReconnected, nil) l.Notify <- nil } reconnectInterval = l.minReconnectInterval nextReconnect = time.Now().Add(reconnectInterval) for { notification, ok := <-l.connNotificationChan if !ok { // lost connection, loop again break } l.Notify <- notification } err := l.disconnectCleanup() if l.closed() { return } l.emitEvent(ListenerEventDisconnected, err) time.Sleep(time.Until(nextReconnect)) } } func (l *Listener) listenerMain() { l.listenerConnLoop() close(l.Notify) } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/encode.go0000644000000000000000000004056415024302467021524 0ustar rootrootpackage pq import ( "bytes" "database/sql/driver" "encoding/binary" "encoding/hex" "errors" "fmt" "math" "regexp" "strconv" "strings" "sync" "time" "github.com/lib/pq/oid" ) var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { switch v := x.(type) { case []byte: return v default: return encode(parameterStatus, x, oid.T_unknown) } } func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { switch v := x.(type) { case int64: return strconv.AppendInt(nil, v, 10) case float64: return strconv.AppendFloat(nil, v, 'f', -1, 64) case []byte: if pgtypOid == oid.T_bytea { return encodeBytea(parameterStatus.serverVersion, v) } return v case string: if pgtypOid == oid.T_bytea { return encodeBytea(parameterStatus.serverVersion, []byte(v)) } return []byte(v) case bool: return strconv.AppendBool(nil, v) case time.Time: return formatTs(v) default: errorf("encode: unknown type for %T", v) } panic("not reached") } func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { switch f { case formatBinary: return binaryDecode(parameterStatus, s, typ) case formatText: return textDecode(parameterStatus, s, typ) default: panic("not reached") } } func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { switch typ { case oid.T_bytea: return s case oid.T_int8: return int64(binary.BigEndian.Uint64(s)) case oid.T_int4: return int64(int32(binary.BigEndian.Uint32(s))) case oid.T_int2: return int64(int16(binary.BigEndian.Uint16(s))) case oid.T_uuid: b, err := decodeUUIDBinary(s) if err != nil { panic(err) } return b default: errorf("don't know how to decode binary parameter of type %d", uint32(typ)) } panic("not reached") } func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { switch typ { case oid.T_char, oid.T_varchar, oid.T_text: return string(s) case oid.T_bytea: b, err := parseBytea(s) if err != nil { errorf("%s", err) } return b case oid.T_timestamptz: return parseTs(parameterStatus.currentLocation, string(s)) case oid.T_timestamp, oid.T_date: return parseTs(nil, string(s)) case oid.T_time: return mustParse("15:04:05", typ, s) case oid.T_timetz: return mustParse("15:04:05-07", typ, s) case oid.T_bool: return s[0] == 't' case oid.T_int8, oid.T_int4, oid.T_int2: i, err := strconv.ParseInt(string(s), 10, 64) if err != nil { errorf("%s", err) } return i case oid.T_float4, oid.T_float8: // We always use 64 bit parsing, regardless of whether the input text is for // a float4 or float8, because clients expect float64s for all float datatypes // and returning a 32-bit parsed float64 produces lossy results. f, err := strconv.ParseFloat(string(s), 64) if err != nil { errorf("%s", err) } return f } return s } // appendEncodedText encodes item in text format as required by COPY // and appends to buf func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { switch v := x.(type) { case int64: return strconv.AppendInt(buf, v, 10) case float64: return strconv.AppendFloat(buf, v, 'f', -1, 64) case []byte: encodedBytea := encodeBytea(parameterStatus.serverVersion, v) return appendEscapedText(buf, string(encodedBytea)) case string: return appendEscapedText(buf, v) case bool: return strconv.AppendBool(buf, v) case time.Time: return append(buf, formatTs(v)...) case nil: return append(buf, "\\N"...) default: errorf("encode: unknown type for %T", v) } panic("not reached") } func appendEscapedText(buf []byte, text string) []byte { escapeNeeded := false startPos := 0 var c byte // check if we need to escape for i := 0; i < len(text); i++ { c = text[i] if c == '\\' || c == '\n' || c == '\r' || c == '\t' { escapeNeeded = true startPos = i break } } if !escapeNeeded { return append(buf, text...) } // copy till first char to escape, iterate the rest result := append(buf, text[:startPos]...) for i := startPos; i < len(text); i++ { c = text[i] switch c { case '\\': result = append(result, '\\', '\\') case '\n': result = append(result, '\\', 'n') case '\r': result = append(result, '\\', 'r') case '\t': result = append(result, '\\', 't') default: result = append(result, c) } } return result } func mustParse(f string, typ oid.Oid, s []byte) time.Time { str := string(s) // Check for a minute and second offset in the timezone. if typ == oid.T_timestamptz || typ == oid.T_timetz { for i := 3; i <= 6; i += 3 { if str[len(str)-i] == ':' { f += ":00" continue } break } } // Special case for 24:00 time. // Unfortunately, golang does not parse 24:00 as a proper time. // In this case, we want to try "round to the next day", to differentiate. // As such, we find if the 24:00 time matches at the beginning; if so, // we default it back to 00:00 but add a day later. var is2400Time bool switch typ { case oid.T_timetz, oid.T_time: if matches := time2400Regex.FindStringSubmatch(str); matches != nil { // Concatenate timezone information at the back. str = "00:00:00" + str[len(matches[1]):] is2400Time = true } } t, err := time.Parse(f, str) if err != nil { errorf("decode: %s", err) } if is2400Time { t = t.Add(24 * time.Hour) } return t } var errInvalidTimestamp = errors.New("invalid timestamp") type timestampParser struct { err error } func (p *timestampParser) expect(str string, char byte, pos int) { if p.err != nil { return } if pos+1 > len(str) { p.err = errInvalidTimestamp return } if c := str[pos]; c != char && p.err == nil { p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) } } func (p *timestampParser) mustAtoi(str string, begin int, end int) int { if p.err != nil { return 0 } if begin < 0 || end < 0 || begin > end || end > len(str) { p.err = errInvalidTimestamp return 0 } result, err := strconv.Atoi(str[begin:end]) if err != nil { if p.err == nil { p.err = fmt.Errorf("expected number; got '%v'", str) } return 0 } return result } // The location cache caches the time zones typically used by the client. type locationCache struct { cache map[int]*time.Location lock sync.Mutex } // All connections share the same list of timezones. Benchmarking shows that // about 5% speed could be gained by putting the cache in the connection and // losing the mutex, at the cost of a small amount of memory and a somewhat // significant increase in code complexity. var globalLocationCache = newLocationCache() func newLocationCache() *locationCache { return &locationCache{cache: make(map[int]*time.Location)} } // Returns the cached timezone for the specified offset, creating and caching // it if necessary. func (c *locationCache) getLocation(offset int) *time.Location { c.lock.Lock() defer c.lock.Unlock() location, ok := c.cache[offset] if !ok { location = time.FixedZone("", offset) c.cache[offset] = location } return location } var infinityTsEnabled = false var infinityTsNegative time.Time var infinityTsPositive time.Time const ( infinityTsEnabledAlready = "pq: infinity timestamp enabled already" infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" ) // EnableInfinityTs controls the handling of Postgres' "-infinity" and // "infinity" "timestamp"s. // // If EnableInfinityTs is not called, "-infinity" and "infinity" will return // []byte("-infinity") and []byte("infinity") respectively, and potentially // cause error "sql: Scan error on column index 0: unsupported driver -> Scan // pair: []uint8 -> *time.Time", when scanning into a time.Time value. // // Once EnableInfinityTs has been called, all connections created using this // driver will decode Postgres' "-infinity" and "infinity" for "timestamp", // "timestamp with time zone" and "date" types to the predefined minimum and // maximum times, respectively. When encoding time.Time values, any time which // equals or precedes the predefined minimum time will be encoded to // "-infinity". Any values at or past the maximum time will similarly be // encoded to "infinity". // // If EnableInfinityTs is called with negative >= positive, it will panic. // Calling EnableInfinityTs after a connection has been established results in // undefined behavior. If EnableInfinityTs is called more than once, it will // panic. func EnableInfinityTs(negative time.Time, positive time.Time) { if infinityTsEnabled { panic(infinityTsEnabledAlready) } if !negative.Before(positive) { panic(infinityTsNegativeMustBeSmaller) } infinityTsEnabled = true infinityTsNegative = negative infinityTsPositive = positive } /* * Testing might want to toggle infinityTsEnabled */ func disableInfinityTs() { infinityTsEnabled = false } // This is a time function specific to the Postgres default DateStyle // setting ("ISO, MDY"), the only one we currently support. This // accounts for the discrepancies between the parsing available with // time.Parse and the Postgres date formatting quirks. func parseTs(currentLocation *time.Location, str string) interface{} { switch str { case "-infinity": if infinityTsEnabled { return infinityTsNegative } return []byte(str) case "infinity": if infinityTsEnabled { return infinityTsPositive } return []byte(str) } t, err := ParseTimestamp(currentLocation, str) if err != nil { panic(err) } return t } // ParseTimestamp parses Postgres' text format. It returns a time.Time in // currentLocation iff that time's offset agrees with the offset sent from the // Postgres server. Otherwise, ParseTimestamp returns a time.Time with the // fixed offset offset provided by the Postgres server. func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { p := timestampParser{} monSep := strings.IndexRune(str, '-') // this is Gregorian year, not ISO Year // In Gregorian system, the year 1 BC is followed by AD 1 year := p.mustAtoi(str, 0, monSep) daySep := monSep + 3 month := p.mustAtoi(str, monSep+1, daySep) p.expect(str, '-', daySep) timeSep := daySep + 3 day := p.mustAtoi(str, daySep+1, timeSep) minLen := monSep + len("01-01") + 1 isBC := strings.HasSuffix(str, " BC") if isBC { minLen += 3 } var hour, minute, second int if len(str) > minLen { p.expect(str, ' ', timeSep) minSep := timeSep + 3 p.expect(str, ':', minSep) hour = p.mustAtoi(str, timeSep+1, minSep) secSep := minSep + 3 p.expect(str, ':', secSep) minute = p.mustAtoi(str, minSep+1, secSep) secEnd := secSep + 3 second = p.mustAtoi(str, secSep+1, secEnd) } remainderIdx := monSep + len("01-01 00:00:00") + 1 // Three optional (but ordered) sections follow: the // fractional seconds, the time zone offset, and the BC // designation. We set them up here and adjust the other // offsets if the preceding sections exist. nanoSec := 0 tzOff := 0 if remainderIdx < len(str) && str[remainderIdx] == '.' { fracStart := remainderIdx + 1 fracOff := strings.IndexAny(str[fracStart:], "-+Z ") if fracOff < 0 { fracOff = len(str) - fracStart } fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) remainderIdx += fracOff + 1 } if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { // time zone separator is always '-' or '+' or 'Z' (UTC is +00) var tzSign int switch c := str[tzStart]; c { case '-': tzSign = -1 case '+': tzSign = +1 default: return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) } tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) remainderIdx += 3 var tzMin, tzSec int if remainderIdx < len(str) && str[remainderIdx] == ':' { tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) remainderIdx += 3 } if remainderIdx < len(str) && str[remainderIdx] == ':' { tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) remainderIdx += 3 } tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) } else if tzStart < len(str) && str[tzStart] == 'Z' { // time zone Z separator indicates UTC is +00 remainderIdx += 1 } var isoYear int if isBC { isoYear = 1 - year remainderIdx += 3 } else { isoYear = year } if remainderIdx < len(str) { return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) } t := time.Date(isoYear, time.Month(month), day, hour, minute, second, nanoSec, globalLocationCache.getLocation(tzOff)) if currentLocation != nil { // Set the location of the returned Time based on the session's // TimeZone value, but only if the local time zone database agrees with // the remote database on the offset. lt := t.In(currentLocation) _, newOff := lt.Zone() if newOff == tzOff { t = lt } } return t, p.err } // formatTs formats t into a format postgres understands. func formatTs(t time.Time) []byte { if infinityTsEnabled { // t <= -infinity : ! (t > -infinity) if !t.After(infinityTsNegative) { return []byte("-infinity") } // t >= infinity : ! (!t < infinity) if !t.Before(infinityTsPositive) { return []byte("infinity") } } return FormatTimestamp(t) } // FormatTimestamp formats t into Postgres' text format for timestamps. func FormatTimestamp(t time.Time) []byte { // Need to send dates before 0001 A.D. with " BC" suffix, instead of the // minus sign preferred by Go. // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on bc := false if t.Year() <= 0 { // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" t = t.AddDate((-t.Year())*2+1, 0, 0) bc = true } b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) _, offset := t.Zone() offset %= 60 if offset != 0 { // RFC3339Nano already printed the minus sign if offset < 0 { offset = -offset } b = append(b, ':') if offset < 10 { b = append(b, '0') } b = strconv.AppendInt(b, int64(offset), 10) } if bc { b = append(b, " BC"...) } return b } // Parse a bytea value received from the server. Both "hex" and the legacy // "escape" format are supported. func parseBytea(s []byte) (result []byte, err error) { if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { // bytea_output = hex s = s[2:] // trim off leading "\\x" result = make([]byte, hex.DecodedLen(len(s))) _, err := hex.Decode(result, s) if err != nil { return nil, err } } else { // bytea_output = escape for len(s) > 0 { if s[0] == '\\' { // escaped '\\' if len(s) >= 2 && s[1] == '\\' { result = append(result, '\\') s = s[2:] continue } // '\\' followed by an octal number if len(s) < 4 { return nil, fmt.Errorf("invalid bytea sequence %v", s) } r, err := strconv.ParseUint(string(s[1:4]), 8, 8) if err != nil { return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) } result = append(result, byte(r)) s = s[4:] } else { // We hit an unescaped, raw byte. Try to read in as many as // possible in one go. i := bytes.IndexByte(s, '\\') if i == -1 { result = append(result, s...) break } result = append(result, s[:i]...) s = s[i:] } } } return result, nil } func encodeBytea(serverVersion int, v []byte) (result []byte) { if serverVersion >= 90000 { // Use the hex format if we know that the server supports it result = make([]byte, 2+hex.EncodedLen(len(v))) result[0] = '\\' result[1] = 'x' hex.Encode(result[2:], v) } else { // .. or resort to "escape" for _, b := range v { if b == '\\' { result = append(result, '\\', '\\') } else if b < 0x20 || b > 0x7e { result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) } else { result = append(result, b) } } } return result } // NullTime represents a time.Time that may be null. NullTime implements the // sql.Scanner interface so it can be used as a scan destination, similar to // sql.NullString. type NullTime struct { Time time.Time Valid bool // Valid is true if Time is not NULL } // Scan implements the Scanner interface. func (nt *NullTime) Scan(value interface{}) error { nt.Time, nt.Valid = value.(time.Time) return nil } // Value implements the driver Valuer interface. func (nt NullTime) Value() (driver.Value, error) { if !nt.Valid { return nil, nil } return nt.Time, nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/array.go0000644000000000000000000005036115024302467021401 0ustar rootrootpackage pq import ( "bytes" "database/sql" "database/sql/driver" "encoding/hex" "fmt" "reflect" "strconv" "strings" ) var typeByteSlice = reflect.TypeOf([]byte{}) var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() // Array returns the optimal driver.Valuer and sql.Scanner for an array or // slice of any dimension. // // For example: // db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) // // var x []sql.NullInt64 // db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x)) // // Scanning multi-dimensional arrays is not supported. Arrays where the lower // bound is not one (such as `[0:0]={1}') are not supported. func Array(a interface{}) interface { driver.Valuer sql.Scanner } { switch a := a.(type) { case []bool: return (*BoolArray)(&a) case []float64: return (*Float64Array)(&a) case []float32: return (*Float32Array)(&a) case []int64: return (*Int64Array)(&a) case []int32: return (*Int32Array)(&a) case []string: return (*StringArray)(&a) case [][]byte: return (*ByteaArray)(&a) case *[]bool: return (*BoolArray)(a) case *[]float64: return (*Float64Array)(a) case *[]float32: return (*Float32Array)(a) case *[]int64: return (*Int64Array)(a) case *[]int32: return (*Int32Array)(a) case *[]string: return (*StringArray)(a) case *[][]byte: return (*ByteaArray)(a) } return GenericArray{a} } // ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner // to override the array delimiter used by GenericArray. type ArrayDelimiter interface { // ArrayDelimiter returns the delimiter character(s) for this element's type. ArrayDelimiter() string } // BoolArray represents a one-dimensional array of the PostgreSQL boolean type. type BoolArray []bool // Scan implements the sql.Scanner interface. func (a *BoolArray) Scan(src interface{}) error { switch src := src.(type) { case []byte: return a.scanBytes(src) case string: return a.scanBytes([]byte(src)) case nil: *a = nil return nil } return fmt.Errorf("pq: cannot convert %T to BoolArray", src) } func (a *BoolArray) scanBytes(src []byte) error { elems, err := scanLinearArray(src, []byte{','}, "BoolArray") if err != nil { return err } if *a != nil && len(elems) == 0 { *a = (*a)[:0] } else { b := make(BoolArray, len(elems)) for i, v := range elems { if len(v) != 1 { return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) } switch v[0] { case 't': b[i] = true case 'f': b[i] = false default: return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) } } *a = b } return nil } // Value implements the driver.Valuer interface. func (a BoolArray) Value() (driver.Value, error) { if a == nil { return nil, nil } if n := len(a); n > 0 { // There will be exactly two curly brackets, N bytes of values, // and N-1 bytes of delimiters. b := make([]byte, 1+2*n) for i := 0; i < n; i++ { b[2*i] = ',' if a[i] { b[1+2*i] = 't' } else { b[1+2*i] = 'f' } } b[0] = '{' b[2*n] = '}' return string(b), nil } return "{}", nil } // ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. type ByteaArray [][]byte // Scan implements the sql.Scanner interface. func (a *ByteaArray) Scan(src interface{}) error { switch src := src.(type) { case []byte: return a.scanBytes(src) case string: return a.scanBytes([]byte(src)) case nil: *a = nil return nil } return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) } func (a *ByteaArray) scanBytes(src []byte) error { elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") if err != nil { return err } if *a != nil && len(elems) == 0 { *a = (*a)[:0] } else { b := make(ByteaArray, len(elems)) for i, v := range elems { b[i], err = parseBytea(v) if err != nil { return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) } } *a = b } return nil } // Value implements the driver.Valuer interface. It uses the "hex" format which // is only supported on PostgreSQL 9.0 or newer. func (a ByteaArray) Value() (driver.Value, error) { if a == nil { return nil, nil } if n := len(a); n > 0 { // There will be at least two curly brackets, 2*N bytes of quotes, // 3*N bytes of hex formatting, and N-1 bytes of delimiters. size := 1 + 6*n for _, x := range a { size += hex.EncodedLen(len(x)) } b := make([]byte, size) for i, s := 0, b; i < n; i++ { o := copy(s, `,"\\x`) o += hex.Encode(s[o:], a[i]) s[o] = '"' s = s[o+1:] } b[0] = '{' b[size-1] = '}' return string(b), nil } return "{}", nil } // Float64Array represents a one-dimensional array of the PostgreSQL double // precision type. type Float64Array []float64 // Scan implements the sql.Scanner interface. func (a *Float64Array) Scan(src interface{}) error { switch src := src.(type) { case []byte: return a.scanBytes(src) case string: return a.scanBytes([]byte(src)) case nil: *a = nil return nil } return fmt.Errorf("pq: cannot convert %T to Float64Array", src) } func (a *Float64Array) scanBytes(src []byte) error { elems, err := scanLinearArray(src, []byte{','}, "Float64Array") if err != nil { return err } if *a != nil && len(elems) == 0 { *a = (*a)[:0] } else { b := make(Float64Array, len(elems)) for i, v := range elems { if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { return fmt.Errorf("pq: parsing array element index %d: %v", i, err) } } *a = b } return nil } // Value implements the driver.Valuer interface. func (a Float64Array) Value() (driver.Value, error) { if a == nil { return nil, nil } if n := len(a); n > 0 { // There will be at least two curly brackets, N bytes of values, // and N-1 bytes of delimiters. b := make([]byte, 1, 1+2*n) b[0] = '{' b = strconv.AppendFloat(b, a[0], 'f', -1, 64) for i := 1; i < n; i++ { b = append(b, ',') b = strconv.AppendFloat(b, a[i], 'f', -1, 64) } return string(append(b, '}')), nil } return "{}", nil } // Float32Array represents a one-dimensional array of the PostgreSQL double // precision type. type Float32Array []float32 // Scan implements the sql.Scanner interface. func (a *Float32Array) Scan(src interface{}) error { switch src := src.(type) { case []byte: return a.scanBytes(src) case string: return a.scanBytes([]byte(src)) case nil: *a = nil return nil } return fmt.Errorf("pq: cannot convert %T to Float32Array", src) } func (a *Float32Array) scanBytes(src []byte) error { elems, err := scanLinearArray(src, []byte{','}, "Float32Array") if err != nil { return err } if *a != nil && len(elems) == 0 { *a = (*a)[:0] } else { b := make(Float32Array, len(elems)) for i, v := range elems { var x float64 if x, err = strconv.ParseFloat(string(v), 32); err != nil { return fmt.Errorf("pq: parsing array element index %d: %v", i, err) } b[i] = float32(x) } *a = b } return nil } // Value implements the driver.Valuer interface. func (a Float32Array) Value() (driver.Value, error) { if a == nil { return nil, nil } if n := len(a); n > 0 { // There will be at least two curly brackets, N bytes of values, // and N-1 bytes of delimiters. b := make([]byte, 1, 1+2*n) b[0] = '{' b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32) for i := 1; i < n; i++ { b = append(b, ',') b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32) } return string(append(b, '}')), nil } return "{}", nil } // GenericArray implements the driver.Valuer and sql.Scanner interfaces for // an array or slice of any dimension. type GenericArray struct{ A interface{} } func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { var assign func([]byte, reflect.Value) error var del = "," // TODO calculate the assign function for other types // TODO repeat this section on the element type of arrays or slices (multidimensional) { if reflect.PtrTo(rt).Implements(typeSQLScanner) { // dest is always addressable because it is an element of a slice. assign = func(src []byte, dest reflect.Value) (err error) { ss := dest.Addr().Interface().(sql.Scanner) if src == nil { err = ss.Scan(nil) } else { err = ss.Scan(src) } return } goto FoundType } assign = func([]byte, reflect.Value) error { return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) } } FoundType: if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { del = ad.ArrayDelimiter() } return rt, assign, del } // Scan implements the sql.Scanner interface. func (a GenericArray) Scan(src interface{}) error { dpv := reflect.ValueOf(a.A) switch { case dpv.Kind() != reflect.Ptr: return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) case dpv.IsNil(): return fmt.Errorf("pq: destination %T is nil", a.A) } dv := dpv.Elem() switch dv.Kind() { case reflect.Slice: case reflect.Array: default: return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) } switch src := src.(type) { case []byte: return a.scanBytes(src, dv) case string: return a.scanBytes([]byte(src), dv) case nil: if dv.Kind() == reflect.Slice { dv.Set(reflect.Zero(dv.Type())) return nil } } return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) } func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) dims, elems, err := parseArray(src, []byte(del)) if err != nil { return err } // TODO allow multidimensional if len(dims) > 1 { return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", strings.Replace(fmt.Sprint(dims), " ", "][", -1)) } // Treat a zero-dimensional array like an array with a single dimension of zero. if len(dims) == 0 { dims = append(dims, 0) } for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { switch rt.Kind() { case reflect.Slice: case reflect.Array: if rt.Len() != dims[i] { return fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) } default: // TODO handle multidimensional } } values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) for i, e := range elems { if err := assign(e, values.Index(i)); err != nil { return fmt.Errorf("pq: parsing array element index %d: %v", i, err) } } // TODO handle multidimensional switch dv.Kind() { case reflect.Slice: dv.Set(values.Slice(0, dims[0])) case reflect.Array: for i := 0; i < dims[0]; i++ { dv.Index(i).Set(values.Index(i)) } } return nil } // Value implements the driver.Valuer interface. func (a GenericArray) Value() (driver.Value, error) { if a.A == nil { return nil, nil } rv := reflect.ValueOf(a.A) switch rv.Kind() { case reflect.Slice: if rv.IsNil() { return nil, nil } case reflect.Array: default: return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) } if n := rv.Len(); n > 0 { // There will be at least two curly brackets, N bytes of values, // and N-1 bytes of delimiters. b := make([]byte, 0, 1+2*n) b, _, err := appendArray(b, rv, n) return string(b), err } return "{}", nil } // Int64Array represents a one-dimensional array of the PostgreSQL integer types. type Int64Array []int64 // Scan implements the sql.Scanner interface. func (a *Int64Array) Scan(src interface{}) error { switch src := src.(type) { case []byte: return a.scanBytes(src) case string: return a.scanBytes([]byte(src)) case nil: *a = nil return nil } return fmt.Errorf("pq: cannot convert %T to Int64Array", src) } func (a *Int64Array) scanBytes(src []byte) error { elems, err := scanLinearArray(src, []byte{','}, "Int64Array") if err != nil { return err } if *a != nil && len(elems) == 0 { *a = (*a)[:0] } else { b := make(Int64Array, len(elems)) for i, v := range elems { if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { return fmt.Errorf("pq: parsing array element index %d: %v", i, err) } } *a = b } return nil } // Value implements the driver.Valuer interface. func (a Int64Array) Value() (driver.Value, error) { if a == nil { return nil, nil } if n := len(a); n > 0 { // There will be at least two curly brackets, N bytes of values, // and N-1 bytes of delimiters. b := make([]byte, 1, 1+2*n) b[0] = '{' b = strconv.AppendInt(b, a[0], 10) for i := 1; i < n; i++ { b = append(b, ',') b = strconv.AppendInt(b, a[i], 10) } return string(append(b, '}')), nil } return "{}", nil } // Int32Array represents a one-dimensional array of the PostgreSQL integer types. type Int32Array []int32 // Scan implements the sql.Scanner interface. func (a *Int32Array) Scan(src interface{}) error { switch src := src.(type) { case []byte: return a.scanBytes(src) case string: return a.scanBytes([]byte(src)) case nil: *a = nil return nil } return fmt.Errorf("pq: cannot convert %T to Int32Array", src) } func (a *Int32Array) scanBytes(src []byte) error { elems, err := scanLinearArray(src, []byte{','}, "Int32Array") if err != nil { return err } if *a != nil && len(elems) == 0 { *a = (*a)[:0] } else { b := make(Int32Array, len(elems)) for i, v := range elems { x, err := strconv.ParseInt(string(v), 10, 32) if err != nil { return fmt.Errorf("pq: parsing array element index %d: %v", i, err) } b[i] = int32(x) } *a = b } return nil } // Value implements the driver.Valuer interface. func (a Int32Array) Value() (driver.Value, error) { if a == nil { return nil, nil } if n := len(a); n > 0 { // There will be at least two curly brackets, N bytes of values, // and N-1 bytes of delimiters. b := make([]byte, 1, 1+2*n) b[0] = '{' b = strconv.AppendInt(b, int64(a[0]), 10) for i := 1; i < n; i++ { b = append(b, ',') b = strconv.AppendInt(b, int64(a[i]), 10) } return string(append(b, '}')), nil } return "{}", nil } // StringArray represents a one-dimensional array of the PostgreSQL character types. type StringArray []string // Scan implements the sql.Scanner interface. func (a *StringArray) Scan(src interface{}) error { switch src := src.(type) { case []byte: return a.scanBytes(src) case string: return a.scanBytes([]byte(src)) case nil: *a = nil return nil } return fmt.Errorf("pq: cannot convert %T to StringArray", src) } func (a *StringArray) scanBytes(src []byte) error { elems, err := scanLinearArray(src, []byte{','}, "StringArray") if err != nil { return err } if *a != nil && len(elems) == 0 { *a = (*a)[:0] } else { b := make(StringArray, len(elems)) for i, v := range elems { if b[i] = string(v); v == nil { return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) } } *a = b } return nil } // Value implements the driver.Valuer interface. func (a StringArray) Value() (driver.Value, error) { if a == nil { return nil, nil } if n := len(a); n > 0 { // There will be at least two curly brackets, 2*N bytes of quotes, // and N-1 bytes of delimiters. b := make([]byte, 1, 1+3*n) b[0] = '{' b = appendArrayQuotedBytes(b, []byte(a[0])) for i := 1; i < n; i++ { b = append(b, ',') b = appendArrayQuotedBytes(b, []byte(a[i])) } return string(append(b, '}')), nil } return "{}", nil } // appendArray appends rv to the buffer, returning the extended buffer and // the delimiter used between elements. // // It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { var del string var err error b = append(b, '{') if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { return b, del, err } for i := 1; i < n; i++ { b = append(b, del...) if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { return b, del, err } } return append(b, '}'), del, nil } // appendArrayElement appends rv to the buffer, returning the extended buffer // and the delimiter to use before the next element. // // When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted // using driver.DefaultParameterConverter and the resulting []byte or string // is double-quoted. // // See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { if n := rv.Len(); n > 0 { return appendArray(b, rv, n) } return b, "", nil } } var del = "," var err error var iv interface{} = rv.Interface() if ad, ok := iv.(ArrayDelimiter); ok { del = ad.ArrayDelimiter() } if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { return b, del, err } switch v := iv.(type) { case nil: return append(b, "NULL"...), del, nil case []byte: return appendArrayQuotedBytes(b, v), del, nil case string: return appendArrayQuotedBytes(b, []byte(v)), del, nil } b, err = appendValue(b, iv) return b, del, err } func appendArrayQuotedBytes(b, v []byte) []byte { b = append(b, '"') for { i := bytes.IndexAny(v, `"\`) if i < 0 { b = append(b, v...) break } if i > 0 { b = append(b, v[:i]...) } b = append(b, '\\', v[i]) v = v[i+1:] } return append(b, '"') } func appendValue(b []byte, v driver.Value) ([]byte, error) { return append(b, encode(nil, v, 0)...), nil } // parseArray extracts the dimensions and elements of an array represented in // text format. Only representations emitted by the backend are supported. // Notably, whitespace around brackets and delimiters is significant, and NULL // is case-sensitive. // // See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { var depth, i int if len(src) < 1 || src[0] != '{' { return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) } Open: for i < len(src) { switch src[i] { case '{': depth++ i++ case '}': elems = make([][]byte, 0) goto Close default: break Open } } dims = make([]int, i) Element: for i < len(src) { switch src[i] { case '{': if depth == len(dims) { break Element } depth++ dims[depth-1] = 0 i++ case '"': var elem = []byte{} var escape bool for i++; i < len(src); i++ { if escape { elem = append(elem, src[i]) escape = false } else { switch src[i] { default: elem = append(elem, src[i]) case '\\': escape = true case '"': elems = append(elems, elem) i++ break Element } } } default: for start := i; i < len(src); i++ { if bytes.HasPrefix(src[i:], del) || src[i] == '}' { elem := src[start:i] if len(elem) == 0 { return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) } if bytes.Equal(elem, []byte("NULL")) { elem = nil } elems = append(elems, elem) break Element } } } } for i < len(src) { if bytes.HasPrefix(src[i:], del) && depth > 0 { dims[depth-1]++ i += len(del) goto Element } else if src[i] == '}' && depth > 0 { dims[depth-1]++ depth-- i++ } else { return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) } } Close: for i < len(src) { if src[i] == '}' && depth > 0 { depth-- i++ } else { return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) } } if depth > 0 { err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) } if err == nil { for _, d := range dims { if (len(elems) % d) != 0 { err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") } } } return } func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { dims, elems, err := parseArray(src, del) if err != nil { return nil, err } if len(dims) > 1 { return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) } return elems, err } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/ssl_permissions.go0000644000000000000000000000562415024302467023521 0ustar rootroot//go:build !windows // +build !windows package pq import ( "errors" "os" "syscall" ) const ( rootUserID = uint32(0) // The maximum permissions that a private key file owned by a regular user // is allowed to have. This translates to u=rw. maxUserOwnedKeyPermissions os.FileMode = 0600 // The maximum permissions that a private key file owned by root is allowed // to have. This translates to u=rw,g=r. maxRootOwnedKeyPermissions os.FileMode = 0640 ) var ( errSSLKeyHasUnacceptableUserPermissions = errors.New("permissions for files not owned by root should be u=rw (0600) or less") errSSLKeyHasUnacceptableRootPermissions = errors.New("permissions for root owned files should be u=rw,g=r (0640) or less") ) // sslKeyPermissions checks the permissions on user-supplied ssl key files. // The key file should have very little access. // // libpq does not check key file permissions on Windows. func sslKeyPermissions(sslkey string) error { info, err := os.Stat(sslkey) if err != nil { return err } err = hasCorrectPermissions(info) // return ErrSSLKeyHasWorldPermissions for backwards compatability with // existing code. if err == errSSLKeyHasUnacceptableUserPermissions || err == errSSLKeyHasUnacceptableRootPermissions { err = ErrSSLKeyHasWorldPermissions } return err } // hasCorrectPermissions checks the file info (and the unix-specific stat_t // output) to verify that the permissions on the file are correct. // // If the file is owned by the same user the process is running as, // the file should only have 0600 (u=rw). If the file is owned by root, // and the group matches the group that the process is running in, the // permissions cannot be more than 0640 (u=rw,g=r). The file should // never have world permissions. // // Returns an error when the permission check fails. func hasCorrectPermissions(info os.FileInfo) error { // if file's permission matches 0600, allow access. userPermissionMask := (os.FileMode(0777) ^ maxUserOwnedKeyPermissions) // regardless of if we're running as root or not, 0600 is acceptable, // so we return if we match the regular user permission mask. if info.Mode().Perm()&userPermissionMask == 0 { return nil } // We need to pull the Unix file information to get the file's owner. // If we can't access it, there's some sort of operating system level error // and we should fail rather than attempting to use faulty information. sysInfo := info.Sys() if sysInfo == nil { return ErrSSLKeyUnknownOwnership } unixStat, ok := sysInfo.(*syscall.Stat_t) if !ok { return ErrSSLKeyUnknownOwnership } // if the file is owned by root, we allow 0640 (u=rw,g=r) to match what // Postgres does. if unixStat.Uid == rootUserID { rootPermissionMask := (os.FileMode(0777) ^ maxRootOwnedKeyPermissions) if info.Mode().Perm()&rootPermissionMask != 0 { return errSSLKeyHasUnacceptableRootPermissions } return nil } return errSSLKeyHasUnacceptableUserPermissions } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/issues_test.go0000644000000000000000000000674615024302467022645 0ustar rootrootpackage pq import ( "context" "database/sql" "errors" "testing" "time" ) func TestIssue494(t *testing.T) { db := openTestConn(t) defer db.Close() query := `CREATE TEMP TABLE t (i INT PRIMARY KEY)` if _, err := db.Exec(query); err != nil { t.Fatal(err) } txn, err := db.Begin() if err != nil { t.Fatal(err) } if _, err := txn.Prepare(CopyIn("t", "i")); err != nil { t.Fatal(err) } if _, err := txn.Query("SELECT 1"); err == nil { t.Fatal("expected error") } } func TestIssue1046(t *testing.T) { ctxTimeout := time.Second * 2 db := openTestConn(t) defer db.Close() ctx, cancel := context.WithTimeout(context.Background(), ctxTimeout) defer cancel() stmt, err := db.PrepareContext(ctx, `SELECT pg_sleep(10) AS id`) if err != nil { t.Fatal(err) } var d []uint8 err = stmt.QueryRowContext(ctx).Scan(&d) dl, _ := ctx.Deadline() since := time.Since(dl) if since > ctxTimeout { t.Logf("FAIL %s: query returned after context deadline: %v\n", t.Name(), since) t.Fail() } if pgErr := (*Error)(nil); !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode) { t.Logf("ctx.Err(): [%T]%+v\n", ctx.Err(), ctx.Err()) t.Logf("got err: [%T] %+v expected errCode: %v", err, err, cancelErrorCode) t.Fail() } } func TestIssue1062(t *testing.T) { db := openTestConn(t) defer db.Close() // Ensure that cancelling a QueryRowContext does not result in an ErrBadConn. for i := 0; i < 100; i++ { ctx, cancel := context.WithCancel(context.Background()) go cancel() row := db.QueryRowContext(ctx, "select 1") var v int err := row.Scan(&v) if pgErr := (*Error)(nil); err != nil && err != context.Canceled && !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode) { t.Fatalf("Scan resulted in unexpected error %v for canceled QueryRowContext at attempt %d", err, i+1) } } } func connIsValid(t *testing.T, db *sql.DB) { t.Helper() ctx := context.Background() conn, err := db.Conn(ctx) if err != nil { t.Fatal(err) } defer conn.Close() // the connection must be valid err = conn.PingContext(ctx) if err != nil { t.Errorf("PingContext err=%#v", err) } // close must not return an error err = conn.Close() if err != nil { t.Errorf("Close err=%#v", err) } } func TestQueryCancelRace(t *testing.T) { db := openTestConn(t) defer db.Close() // cancel a query while executing on Postgres: must return the cancelled error code ctx, cancel := context.WithCancel(context.Background()) go func() { time.Sleep(10 * time.Millisecond) cancel() }() row := db.QueryRowContext(ctx, "select pg_sleep(0.5)") var pgSleepVoid string err := row.Scan(&pgSleepVoid) if pgErr := (*Error)(nil); !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode) { t.Fatalf("expected cancelled error; err=%#v", err) } // get a connection: it must be a valid connIsValid(t, db) } // Test cancelling a scan after it is started. This broke with 1.10.4. func TestQueryCancelledReused(t *testing.T) { db := openTestConn(t) defer db.Close() ctx, cancel := context.WithCancel(context.Background()) // run a query that returns a lot of data rows, err := db.QueryContext(ctx, "select generate_series(1, 10000)") if err != nil { t.Fatal(err) } // scan the first value if !rows.Next() { t.Error("expected rows.Next() to return true") } var i int err = rows.Scan(&i) if err != nil { t.Fatal(err) } if i != 1 { t.Error(i) } // cancel the context and close rows, ignoring errors cancel() rows.Close() // get a connection: it must be valid connIsValid(t, db) } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/url_test.go0000644000000000000000000000270515024302467022123 0ustar rootrootpackage pq import ( "testing" ) func TestSimpleParseURL(t *testing.T) { expected := "host='hostname.remote'" str, err := ParseURL("postgres://hostname.remote") if err != nil { t.Fatal(err) } if str != expected { t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) } } func TestIPv6LoopbackParseURL(t *testing.T) { expected := "host='::1' port='1234'" str, err := ParseURL("postgres://[::1]:1234") if err != nil { t.Fatal(err) } if str != expected { t.Fatalf("unexpected result from ParseURL:\n+ %v\n- %v", str, expected) } } func TestFullParseURL(t *testing.T) { expected := `dbname='database' host='hostname.remote' password='top secret' port='1234' user='username'` str, err := ParseURL("postgres://username:top%20secret@hostname.remote:1234/database") if err != nil { t.Fatal(err) } if str != expected { t.Fatalf("unexpected result from ParseURL:\n+ %s\n- %s", str, expected) } } func TestInvalidProtocolParseURL(t *testing.T) { _, err := ParseURL("http://hostname.remote") switch err { case nil: t.Fatal("Expected an error from parsing invalid protocol") default: msg := "invalid connection protocol: http" if err.Error() != msg { t.Fatalf("Unexpected error message:\n+ %s\n- %s", err.Error(), msg) } } } func TestMinimalURL(t *testing.T) { cs, err := ParseURL("postgres://") if err != nil { t.Fatal(err) } if cs != "" { t.Fatalf("expected blank connection string, got: %q", cs) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/user_windows.go0000644000000000000000000000154715024302467023015 0ustar rootroot// Package pq is a pure Go Postgres driver for the database/sql package. package pq import ( "path/filepath" "syscall" ) // Perform Windows user name lookup identically to libpq. // // The PostgreSQL code makes use of the legacy Win32 function // GetUserName, and that function has not been imported into stock Go. // GetUserNameEx is available though, the difference being that a // wider range of names are available. To get the output to be the // same as GetUserName, only the base (or last) component of the // result is returned. func userCurrent() (string, error) { pw_name := make([]uint16, 128) pwname_size := uint32(len(pw_name)) - 1 err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) if err != nil { return "", ErrCouldNotDetectUsername } s := syscall.UTF16ToString(pw_name) u := filepath.Base(s) return u, nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/url.go0000644000000000000000000000316715024302467021067 0ustar rootrootpackage pq import ( "fmt" "net" nurl "net/url" "sort" "strings" ) // ParseURL no longer needs to be used by clients of this library since supplying a URL as a // connection string to sql.Open() is now supported: // // sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") // // It remains exported here for backwards-compatibility. // // ParseURL converts a url to a connection string for driver.Open. // Example: // // "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" // // converts to: // // "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" // // A minimal example: // // "postgres://" // // This will be blank, causing driver.Open to use all of the defaults func ParseURL(url string) (string, error) { u, err := nurl.Parse(url) if err != nil { return "", err } if u.Scheme != "postgres" && u.Scheme != "postgresql" { return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) } var kvs []string escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`) accrue := func(k, v string) { if v != "" { kvs = append(kvs, k+"='"+escaper.Replace(v)+"'") } } if u.User != nil { v := u.User.Username() accrue("user", v) v, _ = u.User.Password() accrue("password", v) } if host, port, err := net.SplitHostPort(u.Host); err != nil { accrue("host", u.Host) } else { accrue("host", host) accrue("port", port) } if u.Path != "" { accrue("dbname", u.Path[1:]) } q := u.Query() for k := range q { accrue(k, q.Get(k)) } sort.Strings(kvs) // Makes testing easier (not a performance concern) return strings.Join(kvs, " "), nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/notify_test.go0000644000000000000000000003023215024302467022625 0ustar rootrootpackage pq import ( "database/sql" "database/sql/driver" "errors" "fmt" "io" "os" "runtime" "sync" "testing" "time" ) var errNilNotification = errors.New("nil notification") func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error { select { case n := <-ch: if n == nil { return errNilNotification } if n.Channel != relname || n.Extra != extra { return fmt.Errorf("unexpected notification %v", n) } return nil case <-time.After(1500 * time.Millisecond): return fmt.Errorf("timeout") } } func expectNoNotification(t *testing.T, ch <-chan *Notification) error { select { case n := <-ch: return fmt.Errorf("unexpected notification %v", n) case <-time.After(100 * time.Millisecond): return nil } } func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error { select { case e := <-eventch: if e != et { return fmt.Errorf("unexpected event %v", e) } return nil case <-time.After(1500 * time.Millisecond): panic("expectEvent timeout") } } func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error { select { case e := <-eventch: return fmt.Errorf("unexpected event %v", e) case <-time.After(100 * time.Millisecond): return nil } } func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) { datname := os.Getenv("PGDATABASE") sslmode := os.Getenv("PGSSLMODE") if datname == "" { os.Setenv("PGDATABASE", "pqgotest") } if sslmode == "" { os.Setenv("PGSSLMODE", "disable") } notificationChan := make(chan *Notification) l, err := NewListenerConn("", notificationChan) if err != nil { t.Fatal(err) } return l, notificationChan } func TestNewListenerConn(t *testing.T) { l, _ := newTestListenerConn(t) defer l.Close() } func TestConnListen(t *testing.T) { l, channel := newTestListenerConn(t) defer l.Close() db := openTestConn(t) defer db.Close() ok, err := l.Listen("notify_test") if !ok || err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_test") if err != nil { t.Fatal(err) } err = expectNotification(t, channel, "notify_test", "") if err != nil { t.Fatal(err) } } func TestConnUnlisten(t *testing.T) { l, channel := newTestListenerConn(t) defer l.Close() db := openTestConn(t) defer db.Close() ok, err := l.Listen("notify_test") if !ok || err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_test") if err != nil { t.Fatal(err) } err = expectNotification(t, channel, "notify_test", "") if err != nil { t.Fatal(err) } ok, err = l.Unlisten("notify_test") if !ok || err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_test") if err != nil { t.Fatal(err) } err = expectNoNotification(t, channel) if err != nil { t.Fatal(err) } } func TestConnUnlistenAll(t *testing.T) { l, channel := newTestListenerConn(t) defer l.Close() db := openTestConn(t) defer db.Close() ok, err := l.Listen("notify_test") if !ok || err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_test") if err != nil { t.Fatal(err) } err = expectNotification(t, channel, "notify_test", "") if err != nil { t.Fatal(err) } ok, err = l.UnlistenAll() if !ok || err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_test") if err != nil { t.Fatal(err) } err = expectNoNotification(t, channel) if err != nil { t.Fatal(err) } } func TestConnClose(t *testing.T) { l, _ := newTestListenerConn(t) defer l.Close() err := l.Close() if err != nil { t.Fatal(err) } err = l.Close() if err != errListenerConnClosed { t.Fatalf("expected errListenerConnClosed; got %v", err) } } func TestConnPing(t *testing.T) { l, _ := newTestListenerConn(t) defer l.Close() err := l.Ping() if err != nil { t.Fatal(err) } err = l.Close() if err != nil { t.Fatal(err) } err = l.Ping() if err != errListenerConnClosed { t.Fatalf("expected errListenerConnClosed; got %v", err) } } // Test for deadlock where a query fails while another one is queued func TestConnExecDeadlock(t *testing.T) { l, _ := newTestListenerConn(t) defer l.Close() var wg sync.WaitGroup wg.Add(2) go func() { l.ExecSimpleQuery("SELECT pg_sleep(60)") wg.Done() }() runtime.Gosched() go func() { l.ExecSimpleQuery("SELECT 1") wg.Done() }() // give the two goroutines some time to get into position runtime.Gosched() // calls Close on the net.Conn; equivalent to a network failure l.Close() defer time.AfterFunc(10*time.Second, func() { panic("timed out") }).Stop() wg.Wait() } // Test for ListenerConn being closed while a slow query is executing func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) { l, _ := newTestListenerConn(t) defer l.Close() var wg sync.WaitGroup wg.Add(1) go func() { sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)") if sent { panic("expected sent=false") } // could be any of a number of errors if err == nil { panic("expected error") } wg.Done() }() // give the above goroutine some time to get into position runtime.Gosched() err := l.Close() if err != nil { t.Fatal(err) } defer time.AfterFunc(10*time.Second, func() { panic("timed out") }).Stop() wg.Wait() } func TestNotifyExtra(t *testing.T) { db := openTestConn(t) defer db.Close() if getServerVersion(t, db) < 90000 { t.Skip("skipping NOTIFY payload test since the server does not appear to support it") } l, channel := newTestListenerConn(t) defer l.Close() ok, err := l.Listen("notify_test") if !ok || err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_test, 'something'") if err != nil { t.Fatal(err) } err = expectNotification(t, channel, "notify_test", "something") if err != nil { t.Fatal(err) } } // create a new test listener and also set the timeouts func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) { datname := os.Getenv("PGDATABASE") sslmode := os.Getenv("PGSSLMODE") if datname == "" { os.Setenv("PGDATABASE", "pqgotest") } if sslmode == "" { os.Setenv("PGSSLMODE", "disable") } eventch := make(chan ListenerEventType, 16) l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t }) err := expectEvent(t, eventch, ListenerEventConnected) if err != nil { t.Fatal(err) } return l, eventch } func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) { return newTestListenerTimeout(t, time.Hour, time.Hour) } func TestListenerListen(t *testing.T) { l, _ := newTestListener(t) defer l.Close() db := openTestConn(t) defer db.Close() err := l.Listen("notify_listen_test") if err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } err = expectNotification(t, l.Notify, "notify_listen_test", "") if err != nil { t.Fatal(err) } } func TestListenerUnlisten(t *testing.T) { l, _ := newTestListener(t) defer l.Close() db := openTestConn(t) defer db.Close() err := l.Listen("notify_listen_test") if err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } err = l.Unlisten("notify_listen_test") if err != nil { t.Fatal(err) } err = expectNotification(t, l.Notify, "notify_listen_test", "") if err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } err = expectNoNotification(t, l.Notify) if err != nil { t.Fatal(err) } } func TestListenerUnlistenAll(t *testing.T) { l, _ := newTestListener(t) defer l.Close() db := openTestConn(t) defer db.Close() err := l.Listen("notify_listen_test") if err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } err = l.UnlistenAll() if err != nil { t.Fatal(err) } err = expectNotification(t, l.Notify, "notify_listen_test", "") if err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } err = expectNoNotification(t, l.Notify) if err != nil { t.Fatal(err) } } func TestListenerFailedQuery(t *testing.T) { l, eventch := newTestListener(t) defer l.Close() db := openTestConn(t) defer db.Close() err := l.Listen("notify_listen_test") if err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } err = expectNotification(t, l.Notify, "notify_listen_test", "") if err != nil { t.Fatal(err) } // shouldn't cause a disconnect ok, err := l.cn.ExecSimpleQuery("SELECT error") if !ok { t.Fatalf("could not send query to server: %v", err) } _, ok = err.(PGError) if !ok { t.Fatalf("unexpected error %v", err) } err = expectNoEvent(t, eventch) if err != nil { t.Fatal(err) } // should still work _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } err = expectNotification(t, l.Notify, "notify_listen_test", "") if err != nil { t.Fatal(err) } } func TestListenerReconnect(t *testing.T) { l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) defer l.Close() db := openTestConn(t) defer db.Close() err := l.Listen("notify_listen_test") if err != nil { t.Fatal(err) } _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } err = expectNotification(t, l.Notify, "notify_listen_test", "") if err != nil { t.Fatal(err) } // kill the connection and make sure it comes back up ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())") if ok { t.Fatalf("could not kill the connection: %v", err) } if err != io.EOF { t.Fatalf("unexpected error %v", err) } err = expectEvent(t, eventch, ListenerEventDisconnected) if err != nil { t.Fatal(err) } err = expectEvent(t, eventch, ListenerEventReconnected) if err != nil { t.Fatal(err) } // should still work _, err = db.Exec("NOTIFY notify_listen_test") if err != nil { t.Fatal(err) } // should get nil after Reconnected err = expectNotification(t, l.Notify, "", "") if err != errNilNotification { t.Fatal(err) } err = expectNotification(t, l.Notify, "notify_listen_test", "") if err != nil { t.Fatal(err) } } func TestListenerClose(t *testing.T) { l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) defer l.Close() err := l.Close() if err != nil { t.Fatal(err) } err = l.Close() if err != errListenerClosed { t.Fatalf("expected errListenerClosed; got %v", err) } } func TestListenerPing(t *testing.T) { l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour) defer l.Close() err := l.Ping() if err != nil { t.Fatal(err) } err = l.Close() if err != nil { t.Fatal(err) } err = l.Ping() if err != errListenerClosed { t.Fatalf("expected errListenerClosed; got %v", err) } } func TestConnectorWithNotificationHandler_Simple(t *testing.T) { b, err := NewConnector("") if err != nil { t.Fatal(err) } var notification *Notification // Make connector w/ handler to set the local var c := ConnectorWithNotificationHandler(b, func(n *Notification) { notification = n }) sendNotification(c, t, "Test notification #1") if notification == nil || notification.Extra != "Test notification #1" { t.Fatalf("Expected notification w/ message, got %v", notification) } // Unset the handler on the same connector prevC := c if c = ConnectorWithNotificationHandler(c, nil); c != prevC { t.Fatalf("Expected to not create new connector but did") } sendNotification(c, t, "Test notification #2") if notification == nil || notification.Extra != "Test notification #1" { t.Fatalf("Expected notification to not change, got %v", notification) } // Set it back on the same connector if c = ConnectorWithNotificationHandler(c, func(n *Notification) { notification = n }); c != prevC { t.Fatal("Expected to not create new connector but did") } sendNotification(c, t, "Test notification #3") if notification == nil || notification.Extra != "Test notification #3" { t.Fatalf("Expected notification w/ message, got %v", notification) } } func sendNotification(c driver.Connector, t *testing.T, escapedNotification string) { db := sql.OpenDB(c) defer db.Close() sql := fmt.Sprintf("LISTEN foo; NOTIFY foo, '%s';", escapedNotification) if _, err := db.Exec(sql); err != nil { t.Fatal(err) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/example/0000755000000000000000000000000015024302467021362 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/example/listen/0000755000000000000000000000000015024302467022660 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/example/listen/doc.go0000644000000000000000000000510115024302467023751 0ustar rootroot/* Package listen is a self-contained Go program which uses the LISTEN / NOTIFY mechanism to avoid polling the database while waiting for more work to arrive. // // You can see the program in action by defining a function similar to // the following: // // CREATE OR REPLACE FUNCTION public.get_work() // RETURNS bigint // LANGUAGE sql // AS $$ // SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END // $$ // ; package main import ( "database/sql" "fmt" "time" "github.com/lib/pq" ) func doWork(db *sql.DB, work int64) { // work here } func getWork(db *sql.DB) { for { // get work from the database here var work sql.NullInt64 err := db.QueryRow("SELECT get_work()").Scan(&work) if err != nil { fmt.Println("call to get_work() failed: ", err) time.Sleep(10 * time.Second) continue } if !work.Valid { // no more work to do fmt.Println("ran out of work") return } fmt.Println("starting work on ", work.Int64) go doWork(db, work.Int64) } } func waitForNotification(l *pq.Listener) { select { case <-l.Notify: fmt.Println("received notification, new work available") case <-time.After(90 * time.Second): go l.Ping() // Check if there's more work available, just in case it takes // a while for the Listener to notice connection loss and // reconnect. fmt.Println("received no work for 90 seconds, checking for new work") } } func main() { var conninfo string = "" db, err := sql.Open("postgres", conninfo) if err != nil { panic(err) } reportProblem := func(ev pq.ListenerEventType, err error) { if err != nil { fmt.Println(err.Error()) } } minReconn := 10 * time.Second maxReconn := time.Minute listener := pq.NewListener(conninfo, minReconn, maxReconn, reportProblem) err = listener.Listen("getwork") if err != nil { panic(err) } fmt.Println("entering main loop") for { // process all available work before waiting for notifications getWork(db) waitForNotification(listener) } } */ package listen dependencies/pkg/mod/github.com/lib/pq@v1.10.9/encode_test.go0000644000000000000000000006544215024302467022565 0ustar rootrootpackage pq import ( "bytes" "database/sql" "fmt" "regexp" "testing" "time" "github.com/lib/pq/oid" ) func TestScanTimestamp(t *testing.T) { var nt NullTime tn := time.Now() nt.Scan(tn) if !nt.Valid { t.Errorf("Expected Valid=false") } if nt.Time != tn { t.Errorf("Time value mismatch") } } func TestScanNilTimestamp(t *testing.T) { var nt NullTime nt.Scan(nil) if nt.Valid { t.Errorf("Expected Valid=false") } } var timeTests = []struct { str string timeval time.Time }{ {"22001-02-03", time.Date(22001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, {"2001-02-03", time.Date(2001, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, {"0001-12-31 BC", time.Date(0, time.December, 31, 0, 0, 0, 0, time.FixedZone("", 0))}, {"2001-02-03 BC", time.Date(-2000, time.February, 3, 0, 0, 0, 0, time.FixedZone("", 0))}, {"2001-02-03 04:05:06", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.000001", time.Date(2001, time.February, 3, 4, 5, 6, 1000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.00001", time.Date(2001, time.February, 3, 4, 5, 6, 10000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.0001", time.Date(2001, time.February, 3, 4, 5, 6, 100000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.001", time.Date(2001, time.February, 3, 4, 5, 6, 1000000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.01", time.Date(2001, time.February, 3, 4, 5, 6, 10000000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.1", time.Date(2001, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.12", time.Date(2001, time.February, 3, 4, 5, 6, 120000000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.123", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.1234", time.Date(2001, time.February, 3, 4, 5, 6, 123400000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.12345", time.Date(2001, time.February, 3, 4, 5, 6, 123450000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.123456", time.Date(2001, time.February, 3, 4, 5, 6, 123456000, time.FixedZone("", 0))}, {"2001-02-03 04:05:06.123-07", time.Date(2001, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", -7*60*60))}, {"2001-02-03 04:05:06-07", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -7*60*60))}, {"2001-02-03 04:05:06-07:42", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+42*60)))}, {"2001-02-03 04:05:06-07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9)))}, {"2001-02-03 04:05:06+07:30:09", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", +(7*60*60+30*60+9)))}, {"2001-02-03 04:05:06+07", time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 7*60*60))}, {"0011-02-03 04:05:06 BC", time.Date(-10, time.February, 3, 4, 5, 6, 0, time.FixedZone("", 0))}, {"0011-02-03 04:05:06.123 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, {"0011-02-03 04:05:06.123-07 BC", time.Date(-10, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", -7*60*60))}, {"0001-02-03 04:05:06.123", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, {"0001-02-03 04:05:06.123 BC", time.Date(1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, {"0001-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, {"0002-02-03 04:05:06.123 BC", time.Date(0, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0)).AddDate(-1, 0, 0)}, {"0002-02-03 04:05:06.123 BC", time.Date(-1, time.February, 3, 4, 5, 6, 123000000, time.FixedZone("", 0))}, {"12345-02-03 04:05:06.1", time.Date(12345, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, {"123456-02-03 04:05:06.1", time.Date(123456, time.February, 3, 4, 5, 6, 100000000, time.FixedZone("", 0))}, } // Test that parsing the string results in the expected value. func TestParseTs(t *testing.T) { for i, tt := range timeTests { val, err := ParseTimestamp(nil, tt.str) if err != nil { t.Errorf("%d: got error: %v", i, err) } else if val.String() != tt.timeval.String() { t.Errorf("%d: expected to parse %q into %q; got %q", i, tt.str, tt.timeval, val) } } } var timeErrorTests = []string{ "BC", " BC", "2001", "2001-2-03", "2001-02-3", "2001-02-03 ", "2001-02-03 B", "2001-02-03 04", "2001-02-03 04:", "2001-02-03 04:05", "2001-02-03 04:05 B", "2001-02-03 04:05 BC", "2001-02-03 04:05:", "2001-02-03 04:05:6", "2001-02-03 04:05:06 B", "2001-02-03 04:05:06BC", "2001-02-03 04:05:06.123 B", } // Test that parsing the string results in an error. func TestParseTsErrors(t *testing.T) { for i, tt := range timeErrorTests { _, err := ParseTimestamp(nil, tt) if err == nil { t.Errorf("%d: expected an error from parsing: %v", i, tt) } } } // Now test that sending the value into the database and parsing it back // returns the same time.Time value. func TestEncodeAndParseTs(t *testing.T) { db, err := openTestConnConninfo("timezone='Etc/UTC'") if err != nil { t.Fatal(err) } defer db.Close() for i, tt := range timeTests { var dbstr string err = db.QueryRow("SELECT ($1::timestamptz)::text", tt.timeval).Scan(&dbstr) if err != nil { t.Errorf("%d: could not send value %q to the database: %s", i, tt.timeval, err) continue } val, err := ParseTimestamp(nil, dbstr) if err != nil { t.Errorf("%d: could not parse value %q: %s", i, dbstr, err) continue } val = val.In(tt.timeval.Location()) if val.String() != tt.timeval.String() { t.Errorf("%d: expected to parse %q into %q; got %q", i, dbstr, tt.timeval, val) } } } var formatTimeTests = []struct { time time.Time expected string }{ {time.Time{}, "0001-01-01 00:00:00Z"}, {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03 04:05:06.123456789Z"}, {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03 04:05:06.123456789+02:00"}, {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03 04:05:06.123456789-06:00"}, {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03 04:05:06-07:30:09"}, {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z"}, {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00"}, {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00"}, {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z BC"}, {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00 BC"}, {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00 BC"}, {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09"}, {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09 BC"}, } func TestFormatTs(t *testing.T) { for i, tt := range formatTimeTests { val := string(formatTs(tt.time)) if val != tt.expected { t.Errorf("%d: incorrect time format %q, want %q", i, val, tt.expected) } } } func TestFormatTsBackend(t *testing.T) { db := openTestConn(t) defer db.Close() var str string err := db.QueryRow("SELECT '2001-02-03T04:05:06.007-08:09:10'::time::text").Scan(&str) if err == nil { t.Fatalf("PostgreSQL is accepting an ISO timestamp input for time") } for i, tt := range formatTimeTests { for _, typ := range []string{"date", "time", "timetz", "timestamp", "timestamptz"} { err = db.QueryRow("SELECT $1::"+typ+"::text", tt.time).Scan(&str) if err != nil { t.Errorf("%d: incorrect time format for %v on the backend: %v", i, typ, err) } } } } func TestTimeWithoutTimezone(t *testing.T) { db := openTestConn(t) defer db.Close() tx, err := db.Begin() if err != nil { t.Fatal(err) } defer tx.Rollback() for _, tc := range []struct { refTime string expectedTime time.Time }{ {"11:59:59", time.Date(0, 1, 1, 11, 59, 59, 0, time.UTC)}, {"24:00", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, {"24:00:00", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, {"24:00:00.0", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, {"24:00:00.000000", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, } { t.Run( fmt.Sprintf("%s => %s", tc.refTime, tc.expectedTime.Format(time.RFC3339)), func(t *testing.T) { var gotTime time.Time row := tx.QueryRow("select $1::time", tc.refTime) err = row.Scan(&gotTime) if err != nil { t.Fatal(err) } if !tc.expectedTime.Equal(gotTime) { t.Errorf("timestamps not equal: %s != %s", tc.expectedTime, gotTime) } }, ) } } func TestTimeWithTimezone(t *testing.T) { db := openTestConn(t) defer db.Close() tx, err := db.Begin() if err != nil { t.Fatal(err) } defer tx.Rollback() for _, tc := range []struct { refTime string expectedTime time.Time }{ {"11:59:59+00:00", time.Date(0, 1, 1, 11, 59, 59, 0, time.UTC)}, {"11:59:59+04:00", time.Date(0, 1, 1, 11, 59, 59, 0, time.FixedZone("+04", 4*60*60))}, {"11:59:59+04:01:02", time.Date(0, 1, 1, 11, 59, 59, 0, time.FixedZone("+04:01:02", 4*60*60+1*60+2))}, {"11:59:59-04:01:02", time.Date(0, 1, 1, 11, 59, 59, 0, time.FixedZone("-04:01:02", -(4*60*60+1*60+2)))}, {"24:00+00", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, {"24:00Z", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, {"24:00-04:00", time.Date(0, 1, 2, 0, 0, 0, 0, time.FixedZone("-04", -4*60*60))}, {"24:00:00+00", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, {"24:00:00.0+00", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, {"24:00:00.000000+00", time.Date(0, 1, 2, 0, 0, 0, 0, time.UTC)}, } { t.Run( fmt.Sprintf("%s => %s", tc.refTime, tc.expectedTime.Format(time.RFC3339)), func(t *testing.T) { var gotTime time.Time row := tx.QueryRow("select $1::timetz", tc.refTime) err = row.Scan(&gotTime) if err != nil { t.Fatal(err) } if !tc.expectedTime.Equal(gotTime) { t.Errorf("timestamps not equal: %s != %s", tc.expectedTime, gotTime) } }, ) } } func TestTimestampWithTimeZone(t *testing.T) { db := openTestConn(t) defer db.Close() tx, err := db.Begin() if err != nil { t.Fatal(err) } defer tx.Rollback() // try several different locations, all included in Go's zoneinfo.zip for _, locName := range []string{ "UTC", "America/Chicago", "America/New_York", "Australia/Darwin", "Australia/Perth", } { loc, err := time.LoadLocation(locName) if err != nil { t.Logf("Could not load time zone %s - skipping", locName) continue } // Postgres timestamps have a resolution of 1 microsecond, so don't // use the full range of the Nanosecond argument refTime := time.Date(2012, 11, 6, 10, 23, 42, 123456000, loc) for _, pgTimeZone := range []string{"US/Eastern", "Australia/Darwin"} { // Switch Postgres's timezone to test different output timestamp formats _, err = tx.Exec(fmt.Sprintf("set time zone '%s'", pgTimeZone)) if err != nil { t.Fatal(err) } var gotTime time.Time row := tx.QueryRow("select $1::timestamp with time zone", refTime) err = row.Scan(&gotTime) if err != nil { t.Fatal(err) } if !refTime.Equal(gotTime) { t.Errorf("timestamps not equal: %s != %s", refTime, gotTime) } // check that the time zone is set correctly based on TimeZone pgLoc, err := time.LoadLocation(pgTimeZone) if err != nil { t.Logf("Could not load time zone %s - skipping", pgLoc) continue } translated := refTime.In(pgLoc) if translated.String() != gotTime.String() { t.Errorf("timestamps not equal: %s != %s", translated, gotTime) } } } } func TestTimestampWithOutTimezone(t *testing.T) { db := openTestConn(t) defer db.Close() test := func(ts, pgts string) { r, err := db.Query("SELECT $1::timestamp", pgts) if err != nil { t.Fatalf("Could not run query: %v", err) } if !r.Next() { t.Fatal("Expected at least one row") } var result time.Time err = r.Scan(&result) if err != nil { t.Fatalf("Did not expect error scanning row: %v", err) } expected, err := time.Parse(time.RFC3339, ts) if err != nil { t.Fatalf("Could not parse test time literal: %v", err) } if !result.Equal(expected) { t.Fatalf("Expected time to match %v: got mismatch %v", expected, result) } if r.Next() { t.Fatal("Expected only one row") } } test("2000-01-01T00:00:00Z", "2000-01-01T00:00:00") // Test higher precision time test("2013-01-04T20:14:58.80033Z", "2013-01-04 20:14:58.80033") } func TestInfinityTimestamp(t *testing.T) { db := openTestConn(t) defer db.Close() var err error var resultT time.Time expectedErrorStrRegexp := regexp.MustCompile( `^sql: Scan error on column index 0(, name "timestamp(tz)?"|): unsupported`) type testCases []struct { Query string Param string ExpectedErrorStrRegexp *regexp.Regexp ExpectedVal interface{} } tc := testCases{ {"SELECT $1::timestamp", "-infinity", expectedErrorStrRegexp, "-infinity"}, {"SELECT $1::timestamptz", "-infinity", expectedErrorStrRegexp, "-infinity"}, {"SELECT $1::timestamp", "infinity", expectedErrorStrRegexp, "infinity"}, {"SELECT $1::timestamptz", "infinity", expectedErrorStrRegexp, "infinity"}, } // try to assert []byte to time.Time for _, q := range tc { err = db.QueryRow(q.Query, q.Param).Scan(&resultT) if err == nil || !q.ExpectedErrorStrRegexp.MatchString(err.Error()) { t.Errorf("Scanning -/+infinity, expected error to match regexp %q, got %q", q.ExpectedErrorStrRegexp, err) } } // yield []byte for _, q := range tc { var resultI interface{} err = db.QueryRow(q.Query, q.Param).Scan(&resultI) if err != nil { t.Errorf("Scanning -/+infinity, expected no error, got %q", err) } result, ok := resultI.([]byte) if !ok { t.Errorf("Scanning -/+infinity, expected []byte, got %#v", resultI) } if string(result) != q.ExpectedVal { t.Errorf("Scanning -/+infinity, expected %q, got %q", q.ExpectedVal, result) } } y1500 := time.Date(1500, time.January, 1, 0, 0, 0, 0, time.UTC) y2500 := time.Date(2500, time.January, 1, 0, 0, 0, 0, time.UTC) EnableInfinityTs(y1500, y2500) err = db.QueryRow("SELECT $1::timestamp", "infinity").Scan(&resultT) if err != nil { t.Errorf("Scanning infinity, expected no error, got %q", err) } if !resultT.Equal(y2500) { t.Errorf("Scanning infinity, expected %q, got %q", y2500, resultT) } err = db.QueryRow("SELECT $1::timestamptz", "infinity").Scan(&resultT) if err != nil { t.Errorf("Scanning infinity, expected no error, got %q", err) } if !resultT.Equal(y2500) { t.Errorf("Scanning Infinity, expected time %q, got %q", y2500, resultT.String()) } err = db.QueryRow("SELECT $1::timestamp", "-infinity").Scan(&resultT) if err != nil { t.Errorf("Scanning -infinity, expected no error, got %q", err) } if !resultT.Equal(y1500) { t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) } err = db.QueryRow("SELECT $1::timestamptz", "-infinity").Scan(&resultT) if err != nil { t.Errorf("Scanning -infinity, expected no error, got %q", err) } if !resultT.Equal(y1500) { t.Errorf("Scanning -infinity, expected time %q, got %q", y1500, resultT.String()) } ym1500 := time.Date(-1500, time.January, 1, 0, 0, 0, 0, time.UTC) y11500 := time.Date(11500, time.January, 1, 0, 0, 0, 0, time.UTC) var s string err = db.QueryRow("SELECT $1::timestamp::text", ym1500).Scan(&s) if err != nil { t.Errorf("Encoding -infinity, expected no error, got %q", err) } if s != "-infinity" { t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) } err = db.QueryRow("SELECT $1::timestamptz::text", ym1500).Scan(&s) if err != nil { t.Errorf("Encoding -infinity, expected no error, got %q", err) } if s != "-infinity" { t.Errorf("Encoding -infinity, expected %q, got %q", "-infinity", s) } err = db.QueryRow("SELECT $1::timestamp::text", y11500).Scan(&s) if err != nil { t.Errorf("Encoding infinity, expected no error, got %q", err) } if s != "infinity" { t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) } err = db.QueryRow("SELECT $1::timestamptz::text", y11500).Scan(&s) if err != nil { t.Errorf("Encoding infinity, expected no error, got %q", err) } if s != "infinity" { t.Errorf("Encoding infinity, expected %q, got %q", "infinity", s) } disableInfinityTs() var panicErrorString string func() { defer func() { panicErrorString, _ = recover().(string) }() EnableInfinityTs(y2500, y1500) }() if panicErrorString != infinityTsNegativeMustBeSmaller { t.Errorf("Expected error, %q, got %q", infinityTsNegativeMustBeSmaller, panicErrorString) } } func TestStringWithNul(t *testing.T) { db := openTestConn(t) defer db.Close() hello0world := string("hello\x00world") _, err := db.Query("SELECT $1::text", &hello0world) if err == nil { t.Fatal("Postgres accepts a string with nul in it; " + "injection attacks may be plausible") } } func TestByteSliceToText(t *testing.T) { db := openTestConn(t) defer db.Close() b := []byte("hello world") row := db.QueryRow("SELECT $1::text", b) var result []byte err := row.Scan(&result) if err != nil { t.Fatal(err) } if string(result) != string(b) { t.Fatalf("expected %v but got %v", b, result) } } func TestStringToBytea(t *testing.T) { db := openTestConn(t) defer db.Close() b := "hello world" row := db.QueryRow("SELECT $1::bytea", b) var result []byte err := row.Scan(&result) if err != nil { t.Fatal(err) } if !bytes.Equal(result, []byte(b)) { t.Fatalf("expected %v but got %v", b, result) } } func TestTextByteSliceToUUID(t *testing.T) { db := openTestConn(t) defer db.Close() b := []byte("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11") row := db.QueryRow("SELECT $1::uuid", b) var result string err := row.Scan(&result) if forceBinaryParameters() { pqErr := err.(*Error) if pqErr == nil { t.Errorf("Expected to get error") } else if pqErr.Code != "22P03" { t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) } } else { if err != nil { t.Fatal(err) } if result != string(b) { t.Fatalf("expected %v but got %v", b, result) } } } func TestBinaryByteSlicetoUUID(t *testing.T) { db := openTestConn(t) defer db.Close() b := []byte{'\xa0', '\xee', '\xbc', '\x99', '\x9c', '\x0b', '\x4e', '\xf8', '\xbb', '\x00', '\x6b', '\xb9', '\xbd', '\x38', '\x0a', '\x11'} row := db.QueryRow("SELECT $1::uuid", b) var result string err := row.Scan(&result) if forceBinaryParameters() { if err != nil { t.Fatal(err) } if result != string("a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11") { t.Fatalf("expected %v but got %v", b, result) } } else { pqErr := err.(*Error) if pqErr == nil { t.Errorf("Expected to get error") } else if pqErr.Code != "22021" { t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) } } } func TestStringToUUID(t *testing.T) { db := openTestConn(t) defer db.Close() s := "a0eebc99-9c0b-4ef8-bb00-6bb9bd380a11" row := db.QueryRow("SELECT $1::uuid", s) var result string err := row.Scan(&result) if err != nil { t.Fatal(err) } if result != s { t.Fatalf("expected %v but got %v", s, result) } } func TestTextByteSliceToInt(t *testing.T) { db := openTestConn(t) defer db.Close() expected := 12345678 b := []byte(fmt.Sprintf("%d", expected)) row := db.QueryRow("SELECT $1::int", b) var result int err := row.Scan(&result) if forceBinaryParameters() { pqErr := err.(*Error) if pqErr == nil { t.Errorf("Expected to get error") } else if pqErr.Code != "22P03" { t.Fatalf("Expected to get invalid binary encoding error (22P03), got %s", pqErr.Code) } } else { if err != nil { t.Fatal(err) } if result != expected { t.Fatalf("expected %v but got %v", expected, result) } } } func TestBinaryByteSliceToInt(t *testing.T) { db := openTestConn(t) defer db.Close() expected := 12345678 b := []byte{'\x00', '\xbc', '\x61', '\x4e'} row := db.QueryRow("SELECT $1::int", b) var result int err := row.Scan(&result) if forceBinaryParameters() { if err != nil { t.Fatal(err) } if result != expected { t.Fatalf("expected %v but got %v", expected, result) } } else { pqErr := err.(*Error) if pqErr == nil { t.Errorf("Expected to get error") } else if pqErr.Code != "22021" { t.Fatalf("Expected to get invalid byte sequence for encoding error (22021), got %s", pqErr.Code) } } } func TestTextDecodeIntoString(t *testing.T) { input := []byte("hello world") want := string(input) for _, typ := range []oid.Oid{oid.T_char, oid.T_varchar, oid.T_text} { got := decode(¶meterStatus{}, input, typ, formatText) if got != want { t.Errorf("invalid string decoding output for %T(%+v), got %v but expected %v", typ, typ, got, want) } } } func TestByteaOutputFormatEncoding(t *testing.T) { input := []byte("\\x\x00\x01\x02\xFF\xFEabcdefg0123") want := []byte("\\x5c78000102fffe6162636465666730313233") got := encode(¶meterStatus{serverVersion: 90000}, input, oid.T_bytea) if !bytes.Equal(want, got) { t.Errorf("invalid hex bytea output, got %v but expected %v", got, want) } want = []byte("\\\\x\\000\\001\\002\\377\\376abcdefg0123") got = encode(¶meterStatus{serverVersion: 84000}, input, oid.T_bytea) if !bytes.Equal(want, got) { t.Errorf("invalid escape bytea output, got %v but expected %v", got, want) } } func TestByteaOutputFormats(t *testing.T) { db := openTestConn(t) defer db.Close() if getServerVersion(t, db) < 90000 { // skip return } testByteaOutputFormat := func(f string, usePrepared bool) { expectedData := []byte("\x5c\x78\x00\xff\x61\x62\x63\x01\x08") sqlQuery := "SELECT decode('5c7800ff6162630108', 'hex')" var data []byte // use a txn to avoid relying on getting the same connection txn, err := db.Begin() if err != nil { t.Fatal(err) } defer txn.Rollback() _, err = txn.Exec("SET LOCAL bytea_output TO " + f) if err != nil { t.Fatal(err) } var rows *sql.Rows var stmt *sql.Stmt if usePrepared { stmt, err = txn.Prepare(sqlQuery) if err != nil { t.Fatal(err) } rows, err = stmt.Query() } else { // use Query; QueryRow would hide the actual error rows, err = txn.Query(sqlQuery) } if err != nil { t.Fatal(err) } if !rows.Next() { if rows.Err() != nil { t.Fatal(rows.Err()) } t.Fatal("shouldn't happen") } err = rows.Scan(&data) if err != nil { t.Fatal(err) } err = rows.Close() if err != nil { t.Fatal(err) } if stmt != nil { err = stmt.Close() if err != nil { t.Fatal(err) } } if !bytes.Equal(data, expectedData) { t.Errorf("unexpected bytea value %v for format %s; expected %v", data, f, expectedData) } } testByteaOutputFormat("hex", false) testByteaOutputFormat("escape", false) testByteaOutputFormat("hex", true) testByteaOutputFormat("escape", true) } func TestAppendEncodedText(t *testing.T) { var buf []byte buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, int64(10)) buf = append(buf, '\t') buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, 42.0000000001) buf = append(buf, '\t') buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, "hello\tworld") buf = append(buf, '\t') buf = appendEncodedText(¶meterStatus{serverVersion: 90000}, buf, []byte{0, 128, 255}) if string(buf) != "10\t42.0000000001\thello\\tworld\t\\\\x0080ff" { t.Fatal(string(buf)) } } func TestAppendEscapedText(t *testing.T) { if esc := appendEscapedText(nil, "hallo\tescape"); string(esc) != "hallo\\tescape" { t.Fatal(string(esc)) } if esc := appendEscapedText(nil, "hallo\\tescape\n"); string(esc) != "hallo\\\\tescape\\n" { t.Fatal(string(esc)) } if esc := appendEscapedText(nil, "\n\r\t\f"); string(esc) != "\\n\\r\\t\f" { t.Fatal(string(esc)) } } func TestAppendEscapedTextExistingBuffer(t *testing.T) { buf := []byte("123\t") if esc := appendEscapedText(buf, "hallo\tescape"); string(esc) != "123\thallo\\tescape" { t.Fatal(string(esc)) } buf = []byte("123\t") if esc := appendEscapedText(buf, "hallo\\tescape\n"); string(esc) != "123\thallo\\\\tescape\\n" { t.Fatal(string(esc)) } buf = []byte("123\t") if esc := appendEscapedText(buf, "\n\r\t\f"); string(esc) != "123\t\\n\\r\\t\f" { t.Fatal(string(esc)) } } var formatAndParseTimestamp = []struct { time time.Time expected string }{ {time.Time{}, "0001-01-01 00:00:00Z"}, {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "2001-02-03 04:05:06.123456789Z"}, {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "2001-02-03 04:05:06.123456789+02:00"}, {time.Date(2001, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "2001-02-03 04:05:06.123456789-06:00"}, {time.Date(2001, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "2001-02-03 04:05:06-07:30:09"}, {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z"}, {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00"}, {time.Date(1, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00"}, {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 0)), "0001-02-03 04:05:06.123456789Z BC"}, {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", 2*60*60)), "0001-02-03 04:05:06.123456789+02:00 BC"}, {time.Date(0, time.February, 3, 4, 5, 6, 123456789, time.FixedZone("", -6*60*60)), "0001-02-03 04:05:06.123456789-06:00 BC"}, {time.Date(1, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09"}, {time.Date(0, time.February, 3, 4, 5, 6, 0, time.FixedZone("", -(7*60*60+30*60+9))), "0001-02-03 04:05:06-07:30:09 BC"}, } func TestFormatAndParseTimestamp(t *testing.T) { for _, val := range formatAndParseTimestamp { formattedTime := FormatTimestamp(val.time) parsedTime, err := ParseTimestamp(nil, string(formattedTime)) if err != nil { t.Errorf("invalid parsing, err: %v", err.Error()) } if val.time.UTC() != parsedTime.UTC() { t.Errorf("invalid parsing from formatted timestamp, got %v; expected %v", parsedTime.String(), val.time.String()) } } } func BenchmarkAppendEscapedText(b *testing.B) { longString := "" for i := 0; i < 100; i++ { longString += "123456789\n" } for i := 0; i < b.N; i++ { appendEscapedText(nil, longString) } } func BenchmarkAppendEscapedTextNoEscape(b *testing.B) { longString := "" for i := 0; i < 100; i++ { longString += "1234567890" } for i := 0; i < b.N; i++ { appendEscapedText(nil, longString) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/notice_test.go0000644000000000000000000000266015024302467022602 0ustar rootroot//go:build go1.10 // +build go1.10 package pq import ( "database/sql" "database/sql/driver" "testing" ) func TestConnectorWithNoticeHandler_Simple(t *testing.T) { b, err := NewConnector("") if err != nil { t.Fatal(err) } var notice *Error // Make connector w/ handler to set the local var c := ConnectorWithNoticeHandler(b, func(n *Error) { notice = n }) raiseNotice(c, t, "Test notice #1") if notice == nil || notice.Message != "Test notice #1" { t.Fatalf("Expected notice w/ message, got %v", notice) } // Unset the handler on the same connector prevC := c if c = ConnectorWithNoticeHandler(c, nil); c != prevC { t.Fatalf("Expected to not create new connector but did") } raiseNotice(c, t, "Test notice #2") if notice == nil || notice.Message != "Test notice #1" { t.Fatalf("Expected notice to not change, got %v", notice) } // Set it back on the same connector if c = ConnectorWithNoticeHandler(c, func(n *Error) { notice = n }); c != prevC { t.Fatal("Expected to not create new connector but did") } raiseNotice(c, t, "Test notice #3") if notice == nil || notice.Message != "Test notice #3" { t.Fatalf("Expected notice w/ message, got %v", notice) } } func raiseNotice(c driver.Connector, t *testing.T, escapedNotice string) { db := sql.OpenDB(c) defer db.Close() sql := "DO language plpgsql $$ BEGIN RAISE NOTICE '" + escapedNotice + "'; END $$" if _, err := db.Exec(sql); err != nil { t.Fatal(err) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/ssl_windows.go0000644000000000000000000000043615024302467022634 0ustar rootroot//go:build windows // +build windows package pq // sslKeyPermissions checks the permissions on user-supplied ssl key files. // The key file should have very little access. // // libpq does not check key file permissions on Windows. func sslKeyPermissions(string) error { return nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/go18_test.go0000644000000000000000000002075015024302467022077 0ustar rootrootpackage pq import ( "context" "database/sql" "database/sql/driver" "errors" "runtime" "strings" "testing" "time" ) func TestMultipleSimpleQuery(t *testing.T) { db := openTestConn(t) defer db.Close() rows, err := db.Query("select 1; set time zone default; select 2; select 3") if err != nil { t.Fatal(err) } defer rows.Close() var i int for rows.Next() { if err := rows.Scan(&i); err != nil { t.Fatal(err) } if i != 1 { t.Fatalf("expected 1, got %d", i) } } if !rows.NextResultSet() { t.Fatal("expected more result sets", rows.Err()) } for rows.Next() { if err := rows.Scan(&i); err != nil { t.Fatal(err) } if i != 2 { t.Fatalf("expected 2, got %d", i) } } // Make sure that if we ignore a result we can still query. rows, err = db.Query("select 4; select 5") if err != nil { t.Fatal(err) } defer rows.Close() for rows.Next() { if err := rows.Scan(&i); err != nil { t.Fatal(err) } if i != 4 { t.Fatalf("expected 4, got %d", i) } } if !rows.NextResultSet() { t.Fatal("expected more result sets", rows.Err()) } for rows.Next() { if err := rows.Scan(&i); err != nil { t.Fatal(err) } if i != 5 { t.Fatalf("expected 5, got %d", i) } } if rows.NextResultSet() { t.Fatal("unexpected result set") } } const contextRaceIterations = 100 const cancelErrorCode ErrorCode = "57014" func TestContextCancelExec(t *testing.T) { db := openTestConn(t) defer db.Close() ctx, cancel := context.WithCancel(context.Background()) // Delay execution for just a bit until db.ExecContext has begun. defer time.AfterFunc(time.Millisecond*10, cancel).Stop() // Not canceled until after the exec has started. if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil { t.Fatal("expected error") } else if pgErr := (*Error)(nil); !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode) { t.Fatalf("unexpected error: %s", err) } // Context is already canceled, so error should come before execution. if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil { t.Fatal("expected error") } else if err.Error() != "context canceled" { t.Fatalf("unexpected error: %s", err) } for i := 0; i < contextRaceIterations; i++ { func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() if _, err := db.ExecContext(ctx, "select 1"); err != nil { t.Fatal(err) } }() if _, err := db.Exec("select 1"); err != nil { t.Fatal(err) } } } func TestContextCancelQuery(t *testing.T) { db := openTestConn(t) defer db.Close() ctx, cancel := context.WithCancel(context.Background()) // Delay execution for just a bit until db.QueryContext has begun. defer time.AfterFunc(time.Millisecond*10, cancel).Stop() // Not canceled until after the exec has started. if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil { t.Fatal("expected error") } else if pgErr := (*Error)(nil); !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode) { t.Fatalf("unexpected error: %s", err) } // Context is already canceled, so error should come before execution. if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil { t.Fatal("expected error") } else if err.Error() != "context canceled" { t.Fatalf("unexpected error: %s", err) } for i := 0; i < contextRaceIterations; i++ { func() { ctx, cancel := context.WithCancel(context.Background()) rows, err := db.QueryContext(ctx, "select 1") cancel() if err != nil { t.Fatal(err) } else if err := rows.Close(); err != nil && err != driver.ErrBadConn && err != context.Canceled { t.Fatal(err) } }() if rows, err := db.Query("select 1"); err != nil { t.Fatal(err) } else if err := rows.Close(); err != nil { t.Fatal(err) } } } // TestIssue617 tests that a failed query in QueryContext doesn't lead to a // goroutine leak. func TestIssue617(t *testing.T) { db := openTestConn(t) defer db.Close() const N = 10 numGoroutineStart := runtime.NumGoroutine() for i := 0; i < N; i++ { func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() _, err := db.QueryContext(ctx, `SELECT * FROM DOESNOTEXIST`) pqErr, _ := err.(*Error) // Expecting "pq: relation \"doesnotexist\" does not exist" error. if err == nil || pqErr == nil || pqErr.Code != "42P01" { t.Fatalf("expected undefined table error, got %v", err) } }() } // Give time for goroutines to terminate delayTime := time.Millisecond * 50 waitTime := time.Second iterations := int(waitTime / delayTime) var numGoroutineFinish int for i := 0; i < iterations; i++ { time.Sleep(delayTime) numGoroutineFinish = runtime.NumGoroutine() // We use N/2 and not N because the GC and other actors may increase or // decrease the number of goroutines. if numGoroutineFinish-numGoroutineStart < N/2 { return } } t.Errorf("goroutine leak detected, was %d, now %d", numGoroutineStart, numGoroutineFinish) } func TestContextCancelBegin(t *testing.T) { db := openTestConn(t) defer db.Close() ctx, cancel := context.WithCancel(context.Background()) tx, err := db.BeginTx(ctx, nil) if err != nil { t.Fatal(err) } // Delay execution for just a bit until tx.Exec has begun. defer time.AfterFunc(time.Millisecond*10, cancel).Stop() // Not canceled until after the exec has started. if _, err := tx.Exec("select pg_sleep(1)"); err == nil { t.Fatal("expected error") } else if pgErr := (*Error)(nil); !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode) { t.Fatalf("unexpected error: %s", err) } // Transaction is canceled, so expect an error. if _, err := tx.Query("select pg_sleep(1)"); err == nil { t.Fatal("expected error") } else if err != sql.ErrTxDone { t.Fatalf("unexpected error: %s", err) } // Context is canceled, so cannot begin a transaction. if _, err := db.BeginTx(ctx, nil); err == nil { t.Fatal("expected error") } else if err.Error() != "context canceled" { t.Fatalf("unexpected error: %s", err) } for i := 0; i < contextRaceIterations; i++ { func() { ctx, cancel := context.WithCancel(context.Background()) tx, err := db.BeginTx(ctx, nil) cancel() if err != nil { t.Fatal(err) } else if err, pgErr := tx.Rollback(), (*Error)(nil); err != nil && !(errors.As(err, &pgErr) && pgErr.Code == cancelErrorCode) && err != sql.ErrTxDone && err != driver.ErrBadConn && err != context.Canceled { t.Fatal(err) } }() if tx, err := db.Begin(); err != nil { t.Fatal(err) } else if err := tx.Rollback(); err != nil { t.Fatal(err) } } } func TestTxOptions(t *testing.T) { db := openTestConn(t) defer db.Close() ctx := context.Background() tests := []struct { level sql.IsolationLevel isolation string }{ { level: sql.LevelDefault, isolation: "", }, { level: sql.LevelReadUncommitted, isolation: "read uncommitted", }, { level: sql.LevelReadCommitted, isolation: "read committed", }, { level: sql.LevelRepeatableRead, isolation: "repeatable read", }, { level: sql.LevelSerializable, isolation: "serializable", }, } for _, test := range tests { for _, ro := range []bool{true, false} { tx, err := db.BeginTx(ctx, &sql.TxOptions{ Isolation: test.level, ReadOnly: ro, }) if err != nil { t.Fatal(err) } var isolation string err = tx.QueryRow("select current_setting('transaction_isolation')").Scan(&isolation) if err != nil { t.Fatal(err) } if test.isolation != "" && isolation != test.isolation { t.Errorf("wrong isolation level: %s != %s", isolation, test.isolation) } var isRO string err = tx.QueryRow("select current_setting('transaction_read_only')").Scan(&isRO) if err != nil { t.Fatal(err) } if ro != (isRO == "on") { t.Errorf("read/[write,only] not set: %t != %s for level %s", ro, isRO, test.isolation) } tx.Rollback() } } _, err := db.BeginTx(ctx, &sql.TxOptions{ Isolation: sql.LevelLinearizable, }) if err == nil { t.Fatal("expected LevelLinearizable to fail") } if !strings.Contains(err.Error(), "isolation level not supported") { t.Errorf("Expected error to mention isolation level, got %q", err) } } func TestErrorSQLState(t *testing.T) { r := readBuf([]byte{67, 52, 48, 48, 48, 49, 0, 0}) // 40001 err := parseError(&r) var sqlErr errWithSQLState if !errors.As(err, &sqlErr) { t.Fatal("SQLState interface not satisfied") } if state := err.SQLState(); state != "40001" { t.Fatalf("unexpected SQL state %v", state) } } type errWithSQLState interface { SQLState() string } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/scram/0000755000000000000000000000000015024302467021034 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/scram/scram.go0000644000000000000000000001702515024302467022475 0ustar rootroot// Copyright (c) 2014 - Gustavo Niemeyer // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. // // http://tools.ietf.org/html/rfc5802 // package scram import ( "bytes" "crypto/hmac" "crypto/rand" "encoding/base64" "fmt" "hash" "strconv" "strings" ) // Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). // // A Client may be used within a SASL conversation with logic resembling: // // var in []byte // var client = scram.NewClient(sha1.New, user, pass) // for client.Step(in) { // out := client.Out() // // send out to server // in := serverOut // } // if client.Err() != nil { // // auth failed // } // type Client struct { newHash func() hash.Hash user string pass string step int out bytes.Buffer err error clientNonce []byte serverNonce []byte saltedPass []byte authMsg bytes.Buffer } // NewClient returns a new SCRAM-* client with the provided hash algorithm. // // For SCRAM-SHA-256, for example, use: // // client := scram.NewClient(sha256.New, user, pass) // func NewClient(newHash func() hash.Hash, user, pass string) *Client { c := &Client{ newHash: newHash, user: user, pass: pass, } c.out.Grow(256) c.authMsg.Grow(256) return c } // Out returns the data to be sent to the server in the current step. func (c *Client) Out() []byte { if c.out.Len() == 0 { return nil } return c.out.Bytes() } // Err returns the error that occurred, or nil if there were no errors. func (c *Client) Err() error { return c.err } // SetNonce sets the client nonce to the provided value. // If not set, the nonce is generated automatically out of crypto/rand on the first step. func (c *Client) SetNonce(nonce []byte) { c.clientNonce = nonce } var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") // Step processes the incoming data from the server and makes the // next round of data for the server available via Client.Out. // Step returns false if there are no errors and more data is // still expected. func (c *Client) Step(in []byte) bool { c.out.Reset() if c.step > 2 || c.err != nil { return false } c.step++ switch c.step { case 1: c.err = c.step1(in) case 2: c.err = c.step2(in) case 3: c.err = c.step3(in) } return c.step > 2 || c.err != nil } func (c *Client) step1(in []byte) error { if len(c.clientNonce) == 0 { const nonceLen = 16 buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) if _, err := rand.Read(buf[:nonceLen]); err != nil { return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) } c.clientNonce = buf[nonceLen:] b64.Encode(c.clientNonce, buf[:nonceLen]) } c.authMsg.WriteString("n=") escaper.WriteString(&c.authMsg, c.user) c.authMsg.WriteString(",r=") c.authMsg.Write(c.clientNonce) c.out.WriteString("n,,") c.out.Write(c.authMsg.Bytes()) return nil } var b64 = base64.StdEncoding func (c *Client) step2(in []byte) error { c.authMsg.WriteByte(',') c.authMsg.Write(in) fields := bytes.Split(in, []byte(",")) if len(fields) != 3 { return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) } if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) } if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) } if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) } c.serverNonce = fields[0][2:] if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) } salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) n, err := b64.Decode(salt, fields[1][2:]) if err != nil { return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) } salt = salt[:n] iterCount, err := strconv.Atoi(string(fields[2][2:])) if err != nil { return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) } c.saltPassword(salt, iterCount) c.authMsg.WriteString(",c=biws,r=") c.authMsg.Write(c.serverNonce) c.out.WriteString("c=biws,r=") c.out.Write(c.serverNonce) c.out.WriteString(",p=") c.out.Write(c.clientProof()) return nil } func (c *Client) step3(in []byte) error { var isv, ise bool var fields = bytes.Split(in, []byte(",")) if len(fields) == 1 { isv = bytes.HasPrefix(fields[0], []byte("v=")) ise = bytes.HasPrefix(fields[0], []byte("e=")) } if ise { return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) } else if !isv { return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) } if !bytes.Equal(c.serverSignature(), fields[0][2:]) { return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) } return nil } func (c *Client) saltPassword(salt []byte, iterCount int) { mac := hmac.New(c.newHash, []byte(c.pass)) mac.Write(salt) mac.Write([]byte{0, 0, 0, 1}) ui := mac.Sum(nil) hi := make([]byte, len(ui)) copy(hi, ui) for i := 1; i < iterCount; i++ { mac.Reset() mac.Write(ui) mac.Sum(ui[:0]) for j, b := range ui { hi[j] ^= b } } c.saltedPass = hi } func (c *Client) clientProof() []byte { mac := hmac.New(c.newHash, c.saltedPass) mac.Write([]byte("Client Key")) clientKey := mac.Sum(nil) hash := c.newHash() hash.Write(clientKey) storedKey := hash.Sum(nil) mac = hmac.New(c.newHash, storedKey) mac.Write(c.authMsg.Bytes()) clientProof := mac.Sum(nil) for i, b := range clientKey { clientProof[i] ^= b } clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) b64.Encode(clientProof64, clientProof) return clientProof64 } func (c *Client) serverSignature() []byte { mac := hmac.New(c.newHash, c.saltedPass) mac.Write([]byte("Server Key")) serverKey := mac.Sum(nil) mac = hmac.New(c.newHash, serverKey) mac.Write(c.authMsg.Bytes()) serverSignature := mac.Sum(nil) encoded := make([]byte, b64.EncodedLen(len(serverSignature))) b64.Encode(encoded, serverSignature) return encoded } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/.gitignore0000644000000000000000000000004115024302467021712 0ustar rootroot.db *.test *~ *.swp .idea .vscodedependencies/pkg/mod/github.com/lib/pq@v1.10.9/connector_test.go0000644000000000000000000000252415024302467023312 0ustar rootroot//go:build go1.10 // +build go1.10 package pq import ( "context" "database/sql" "database/sql/driver" "testing" ) func TestNewConnector_WorksWithOpenDB(t *testing.T) { name := "" c, err := NewConnector(name) if err != nil { t.Fatal(err) } db := sql.OpenDB(c) defer db.Close() // database/sql might not call our Open at all unless we do something with // the connection txn, err := db.Begin() if err != nil { t.Fatal(err) } txn.Rollback() } func TestNewConnector_Connect(t *testing.T) { name := "" c, err := NewConnector(name) if err != nil { t.Fatal(err) } db, err := c.Connect(context.Background()) if err != nil { t.Fatal(err) } defer db.Close() // database/sql might not call our Open at all unless we do something with // the connection txn, err := db.(driver.ConnBeginTx).BeginTx(context.Background(), driver.TxOptions{}) if err != nil { t.Fatal(err) } txn.Rollback() } func TestNewConnector_Driver(t *testing.T) { name := "" c, err := NewConnector(name) if err != nil { t.Fatal(err) } db, err := c.Driver().Open(name) if err != nil { t.Fatal(err) } defer db.Close() // database/sql might not call our Open at all unless we do something with // the connection txn, err := db.(driver.ConnBeginTx).BeginTx(context.Background(), driver.TxOptions{}) if err != nil { t.Fatal(err) } txn.Rollback() } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/error.go0000644000000000000000000003674315024302467021424 0ustar rootrootpackage pq import ( "database/sql/driver" "fmt" "io" "net" "runtime" ) // Error severities const ( Efatal = "FATAL" Epanic = "PANIC" Ewarning = "WARNING" Enotice = "NOTICE" Edebug = "DEBUG" Einfo = "INFO" Elog = "LOG" ) // Error represents an error communicating with the server. // // See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields type Error struct { Severity string Code ErrorCode Message string Detail string Hint string Position string InternalPosition string InternalQuery string Where string Schema string Table string Column string DataTypeName string Constraint string File string Line string Routine string } // ErrorCode is a five-character error code. type ErrorCode string // Name returns a more human friendly rendering of the error code, namely the // "condition name". // // See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for // details. func (ec ErrorCode) Name() string { return errorCodeNames[ec] } // ErrorClass is only the class part of an error code. type ErrorClass string // Name returns the condition name of an error class. It is equivalent to the // condition name of the "standard" error code (i.e. the one having the last // three characters "000"). func (ec ErrorClass) Name() string { return errorCodeNames[ErrorCode(ec+"000")] } // Class returns the error class, e.g. "28". // // See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for // details. func (ec ErrorCode) Class() ErrorClass { return ErrorClass(ec[0:2]) } // errorCodeNames is a mapping between the five-character error codes and the // human readable "condition names". It is derived from the list at // http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html var errorCodeNames = map[ErrorCode]string{ // Class 00 - Successful Completion "00000": "successful_completion", // Class 01 - Warning "01000": "warning", "0100C": "dynamic_result_sets_returned", "01008": "implicit_zero_bit_padding", "01003": "null_value_eliminated_in_set_function", "01007": "privilege_not_granted", "01006": "privilege_not_revoked", "01004": "string_data_right_truncation", "01P01": "deprecated_feature", // Class 02 - No Data (this is also a warning class per the SQL standard) "02000": "no_data", "02001": "no_additional_dynamic_result_sets_returned", // Class 03 - SQL Statement Not Yet Complete "03000": "sql_statement_not_yet_complete", // Class 08 - Connection Exception "08000": "connection_exception", "08003": "connection_does_not_exist", "08006": "connection_failure", "08001": "sqlclient_unable_to_establish_sqlconnection", "08004": "sqlserver_rejected_establishment_of_sqlconnection", "08007": "transaction_resolution_unknown", "08P01": "protocol_violation", // Class 09 - Triggered Action Exception "09000": "triggered_action_exception", // Class 0A - Feature Not Supported "0A000": "feature_not_supported", // Class 0B - Invalid Transaction Initiation "0B000": "invalid_transaction_initiation", // Class 0F - Locator Exception "0F000": "locator_exception", "0F001": "invalid_locator_specification", // Class 0L - Invalid Grantor "0L000": "invalid_grantor", "0LP01": "invalid_grant_operation", // Class 0P - Invalid Role Specification "0P000": "invalid_role_specification", // Class 0Z - Diagnostics Exception "0Z000": "diagnostics_exception", "0Z002": "stacked_diagnostics_accessed_without_active_handler", // Class 20 - Case Not Found "20000": "case_not_found", // Class 21 - Cardinality Violation "21000": "cardinality_violation", // Class 22 - Data Exception "22000": "data_exception", "2202E": "array_subscript_error", "22021": "character_not_in_repertoire", "22008": "datetime_field_overflow", "22012": "division_by_zero", "22005": "error_in_assignment", "2200B": "escape_character_conflict", "22022": "indicator_overflow", "22015": "interval_field_overflow", "2201E": "invalid_argument_for_logarithm", "22014": "invalid_argument_for_ntile_function", "22016": "invalid_argument_for_nth_value_function", "2201F": "invalid_argument_for_power_function", "2201G": "invalid_argument_for_width_bucket_function", "22018": "invalid_character_value_for_cast", "22007": "invalid_datetime_format", "22019": "invalid_escape_character", "2200D": "invalid_escape_octet", "22025": "invalid_escape_sequence", "22P06": "nonstandard_use_of_escape_character", "22010": "invalid_indicator_parameter_value", "22023": "invalid_parameter_value", "2201B": "invalid_regular_expression", "2201W": "invalid_row_count_in_limit_clause", "2201X": "invalid_row_count_in_result_offset_clause", "22009": "invalid_time_zone_displacement_value", "2200C": "invalid_use_of_escape_character", "2200G": "most_specific_type_mismatch", "22004": "null_value_not_allowed", "22002": "null_value_no_indicator_parameter", "22003": "numeric_value_out_of_range", "2200H": "sequence_generator_limit_exceeded", "22026": "string_data_length_mismatch", "22001": "string_data_right_truncation", "22011": "substring_error", "22027": "trim_error", "22024": "unterminated_c_string", "2200F": "zero_length_character_string", "22P01": "floating_point_exception", "22P02": "invalid_text_representation", "22P03": "invalid_binary_representation", "22P04": "bad_copy_file_format", "22P05": "untranslatable_character", "2200L": "not_an_xml_document", "2200M": "invalid_xml_document", "2200N": "invalid_xml_content", "2200S": "invalid_xml_comment", "2200T": "invalid_xml_processing_instruction", // Class 23 - Integrity Constraint Violation "23000": "integrity_constraint_violation", "23001": "restrict_violation", "23502": "not_null_violation", "23503": "foreign_key_violation", "23505": "unique_violation", "23514": "check_violation", "23P01": "exclusion_violation", // Class 24 - Invalid Cursor State "24000": "invalid_cursor_state", // Class 25 - Invalid Transaction State "25000": "invalid_transaction_state", "25001": "active_sql_transaction", "25002": "branch_transaction_already_active", "25008": "held_cursor_requires_same_isolation_level", "25003": "inappropriate_access_mode_for_branch_transaction", "25004": "inappropriate_isolation_level_for_branch_transaction", "25005": "no_active_sql_transaction_for_branch_transaction", "25006": "read_only_sql_transaction", "25007": "schema_and_data_statement_mixing_not_supported", "25P01": "no_active_sql_transaction", "25P02": "in_failed_sql_transaction", // Class 26 - Invalid SQL Statement Name "26000": "invalid_sql_statement_name", // Class 27 - Triggered Data Change Violation "27000": "triggered_data_change_violation", // Class 28 - Invalid Authorization Specification "28000": "invalid_authorization_specification", "28P01": "invalid_password", // Class 2B - Dependent Privilege Descriptors Still Exist "2B000": "dependent_privilege_descriptors_still_exist", "2BP01": "dependent_objects_still_exist", // Class 2D - Invalid Transaction Termination "2D000": "invalid_transaction_termination", // Class 2F - SQL Routine Exception "2F000": "sql_routine_exception", "2F005": "function_executed_no_return_statement", "2F002": "modifying_sql_data_not_permitted", "2F003": "prohibited_sql_statement_attempted", "2F004": "reading_sql_data_not_permitted", // Class 34 - Invalid Cursor Name "34000": "invalid_cursor_name", // Class 38 - External Routine Exception "38000": "external_routine_exception", "38001": "containing_sql_not_permitted", "38002": "modifying_sql_data_not_permitted", "38003": "prohibited_sql_statement_attempted", "38004": "reading_sql_data_not_permitted", // Class 39 - External Routine Invocation Exception "39000": "external_routine_invocation_exception", "39001": "invalid_sqlstate_returned", "39004": "null_value_not_allowed", "39P01": "trigger_protocol_violated", "39P02": "srf_protocol_violated", // Class 3B - Savepoint Exception "3B000": "savepoint_exception", "3B001": "invalid_savepoint_specification", // Class 3D - Invalid Catalog Name "3D000": "invalid_catalog_name", // Class 3F - Invalid Schema Name "3F000": "invalid_schema_name", // Class 40 - Transaction Rollback "40000": "transaction_rollback", "40002": "transaction_integrity_constraint_violation", "40001": "serialization_failure", "40003": "statement_completion_unknown", "40P01": "deadlock_detected", // Class 42 - Syntax Error or Access Rule Violation "42000": "syntax_error_or_access_rule_violation", "42601": "syntax_error", "42501": "insufficient_privilege", "42846": "cannot_coerce", "42803": "grouping_error", "42P20": "windowing_error", "42P19": "invalid_recursion", "42830": "invalid_foreign_key", "42602": "invalid_name", "42622": "name_too_long", "42939": "reserved_name", "42804": "datatype_mismatch", "42P18": "indeterminate_datatype", "42P21": "collation_mismatch", "42P22": "indeterminate_collation", "42809": "wrong_object_type", "42703": "undefined_column", "42883": "undefined_function", "42P01": "undefined_table", "42P02": "undefined_parameter", "42704": "undefined_object", "42701": "duplicate_column", "42P03": "duplicate_cursor", "42P04": "duplicate_database", "42723": "duplicate_function", "42P05": "duplicate_prepared_statement", "42P06": "duplicate_schema", "42P07": "duplicate_table", "42712": "duplicate_alias", "42710": "duplicate_object", "42702": "ambiguous_column", "42725": "ambiguous_function", "42P08": "ambiguous_parameter", "42P09": "ambiguous_alias", "42P10": "invalid_column_reference", "42611": "invalid_column_definition", "42P11": "invalid_cursor_definition", "42P12": "invalid_database_definition", "42P13": "invalid_function_definition", "42P14": "invalid_prepared_statement_definition", "42P15": "invalid_schema_definition", "42P16": "invalid_table_definition", "42P17": "invalid_object_definition", // Class 44 - WITH CHECK OPTION Violation "44000": "with_check_option_violation", // Class 53 - Insufficient Resources "53000": "insufficient_resources", "53100": "disk_full", "53200": "out_of_memory", "53300": "too_many_connections", "53400": "configuration_limit_exceeded", // Class 54 - Program Limit Exceeded "54000": "program_limit_exceeded", "54001": "statement_too_complex", "54011": "too_many_columns", "54023": "too_many_arguments", // Class 55 - Object Not In Prerequisite State "55000": "object_not_in_prerequisite_state", "55006": "object_in_use", "55P02": "cant_change_runtime_param", "55P03": "lock_not_available", // Class 57 - Operator Intervention "57000": "operator_intervention", "57014": "query_canceled", "57P01": "admin_shutdown", "57P02": "crash_shutdown", "57P03": "cannot_connect_now", "57P04": "database_dropped", // Class 58 - System Error (errors external to PostgreSQL itself) "58000": "system_error", "58030": "io_error", "58P01": "undefined_file", "58P02": "duplicate_file", // Class F0 - Configuration File Error "F0000": "config_file_error", "F0001": "lock_file_exists", // Class HV - Foreign Data Wrapper Error (SQL/MED) "HV000": "fdw_error", "HV005": "fdw_column_name_not_found", "HV002": "fdw_dynamic_parameter_value_needed", "HV010": "fdw_function_sequence_error", "HV021": "fdw_inconsistent_descriptor_information", "HV024": "fdw_invalid_attribute_value", "HV007": "fdw_invalid_column_name", "HV008": "fdw_invalid_column_number", "HV004": "fdw_invalid_data_type", "HV006": "fdw_invalid_data_type_descriptors", "HV091": "fdw_invalid_descriptor_field_identifier", "HV00B": "fdw_invalid_handle", "HV00C": "fdw_invalid_option_index", "HV00D": "fdw_invalid_option_name", "HV090": "fdw_invalid_string_length_or_buffer_length", "HV00A": "fdw_invalid_string_format", "HV009": "fdw_invalid_use_of_null_pointer", "HV014": "fdw_too_many_handles", "HV001": "fdw_out_of_memory", "HV00P": "fdw_no_schemas", "HV00J": "fdw_option_name_not_found", "HV00K": "fdw_reply_handle", "HV00Q": "fdw_schema_not_found", "HV00R": "fdw_table_not_found", "HV00L": "fdw_unable_to_create_execution", "HV00M": "fdw_unable_to_create_reply", "HV00N": "fdw_unable_to_establish_connection", // Class P0 - PL/pgSQL Error "P0000": "plpgsql_error", "P0001": "raise_exception", "P0002": "no_data_found", "P0003": "too_many_rows", // Class XX - Internal Error "XX000": "internal_error", "XX001": "data_corrupted", "XX002": "index_corrupted", } func parseError(r *readBuf) *Error { err := new(Error) for t := r.byte(); t != 0; t = r.byte() { msg := r.string() switch t { case 'S': err.Severity = msg case 'C': err.Code = ErrorCode(msg) case 'M': err.Message = msg case 'D': err.Detail = msg case 'H': err.Hint = msg case 'P': err.Position = msg case 'p': err.InternalPosition = msg case 'q': err.InternalQuery = msg case 'W': err.Where = msg case 's': err.Schema = msg case 't': err.Table = msg case 'c': err.Column = msg case 'd': err.DataTypeName = msg case 'n': err.Constraint = msg case 'F': err.File = msg case 'L': err.Line = msg case 'R': err.Routine = msg } } return err } // Fatal returns true if the Error Severity is fatal. func (err *Error) Fatal() bool { return err.Severity == Efatal } // SQLState returns the SQLState of the error. func (err *Error) SQLState() string { return string(err.Code) } // Get implements the legacy PGError interface. New code should use the fields // of the Error struct directly. func (err *Error) Get(k byte) (v string) { switch k { case 'S': return err.Severity case 'C': return string(err.Code) case 'M': return err.Message case 'D': return err.Detail case 'H': return err.Hint case 'P': return err.Position case 'p': return err.InternalPosition case 'q': return err.InternalQuery case 'W': return err.Where case 's': return err.Schema case 't': return err.Table case 'c': return err.Column case 'd': return err.DataTypeName case 'n': return err.Constraint case 'F': return err.File case 'L': return err.Line case 'R': return err.Routine } return "" } func (err *Error) Error() string { return "pq: " + err.Message } // PGError is an interface used by previous versions of pq. It is provided // only to support legacy code. New code should use the Error type. type PGError interface { Error() string Fatal() bool Get(k byte) (v string) } func errorf(s string, args ...interface{}) { panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) } // TODO(ainar-g) Rename to errorf after removing panics. func fmterrorf(s string, args ...interface{}) error { return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) } func errRecoverNoErrBadConn(err *error) { e := recover() if e == nil { // Do nothing return } var ok bool *err, ok = e.(error) if !ok { *err = fmt.Errorf("pq: unexpected error: %#v", e) } } func (cn *conn) errRecover(err *error) { e := recover() switch v := e.(type) { case nil: // Do nothing case runtime.Error: cn.err.set(driver.ErrBadConn) panic(v) case *Error: if v.Fatal() { *err = driver.ErrBadConn } else { *err = v } case *net.OpError: cn.err.set(driver.ErrBadConn) *err = v case *safeRetryError: cn.err.set(driver.ErrBadConn) *err = driver.ErrBadConn case error: if v == io.EOF || v.Error() == "remote error: handshake failure" { *err = driver.ErrBadConn } else { *err = v } default: cn.err.set(driver.ErrBadConn) panic(fmt.Sprintf("unknown error: %#v", e)) } // Any time we return ErrBadConn, we need to remember it since *Tx doesn't // mark the connection bad in database/sql. if *err == driver.ErrBadConn { cn.err.set(driver.ErrBadConn) } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/ssl_permissions_test.go0000644000000000000000000000364415024302467024560 0ustar rootroot//go:build !windows // +build !windows package pq import ( "os" "syscall" "testing" "time" ) type stat_t_wrapper struct { stat syscall.Stat_t } func (stat_t *stat_t_wrapper) Name() string { return "pem.key" } func (stat_t *stat_t_wrapper) Size() int64 { return int64(100) } func (stat_t *stat_t_wrapper) Mode() os.FileMode { return os.FileMode(stat_t.stat.Mode) } func (stat_t *stat_t_wrapper) ModTime() time.Time { return time.Now() } func (stat_t *stat_t_wrapper) IsDir() bool { return true } func (stat_t *stat_t_wrapper) Sys() interface{} { return &stat_t.stat } func TestHasCorrectRootGroupPermissions(t *testing.T) { currentUID := uint32(os.Getuid()) currentGID := uint32(os.Getgid()) testData := []struct { expectedError error stat syscall.Stat_t }{ { expectedError: nil, stat: syscall.Stat_t{ Mode: 0600, Uid: currentUID, Gid: currentGID, }, }, { expectedError: nil, stat: syscall.Stat_t{ Mode: 0640, Uid: 0, Gid: currentGID, }, }, { expectedError: errSSLKeyHasUnacceptableUserPermissions, stat: syscall.Stat_t{ Mode: 0666, Uid: currentUID, Gid: currentGID, }, }, { expectedError: errSSLKeyHasUnacceptableRootPermissions, stat: syscall.Stat_t{ Mode: 0666, Uid: 0, Gid: currentGID, }, }, } for _, test := range testData { wrapper := &stat_t_wrapper{ stat: test.stat, } if test.expectedError != hasCorrectPermissions(wrapper) { if test.expectedError == nil { t.Errorf( "file owned by %d:%d with %s should not have failed check with error \"%s\"", test.stat.Uid, test.stat.Gid, wrapper.Mode(), hasCorrectPermissions(wrapper), ) continue } t.Errorf( "file owned by %d:%d with %s, expected \"%s\", got \"%s\"", test.stat.Uid, test.stat.Gid, wrapper.Mode(), test.expectedError, hasCorrectPermissions(wrapper), ) } } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/rows_test.go0000644000000000000000000001115415024302467022311 0ustar rootrootpackage pq import ( "math" "reflect" "testing" "github.com/lib/pq/oid" ) func TestDataTypeName(t *testing.T) { tts := []struct { typ oid.Oid name string }{ {oid.T_int8, "INT8"}, {oid.T_int4, "INT4"}, {oid.T_int2, "INT2"}, {oid.T_varchar, "VARCHAR"}, {oid.T_text, "TEXT"}, {oid.T_bool, "BOOL"}, {oid.T_numeric, "NUMERIC"}, {oid.T_date, "DATE"}, {oid.T_time, "TIME"}, {oid.T_timetz, "TIMETZ"}, {oid.T_timestamp, "TIMESTAMP"}, {oid.T_timestamptz, "TIMESTAMPTZ"}, {oid.T_bytea, "BYTEA"}, } for i, tt := range tts { dt := fieldDesc{OID: tt.typ} if name := dt.Name(); name != tt.name { t.Errorf("(%d) got: %s want: %s", i, name, tt.name) } } } func TestDataType(t *testing.T) { tts := []struct { typ oid.Oid kind reflect.Kind }{ {oid.T_int8, reflect.Int64}, {oid.T_int4, reflect.Int32}, {oid.T_int2, reflect.Int16}, {oid.T_varchar, reflect.String}, {oid.T_text, reflect.String}, {oid.T_bool, reflect.Bool}, {oid.T_date, reflect.Struct}, {oid.T_time, reflect.Struct}, {oid.T_timetz, reflect.Struct}, {oid.T_timestamp, reflect.Struct}, {oid.T_timestamptz, reflect.Struct}, {oid.T_bytea, reflect.Slice}, } for i, tt := range tts { dt := fieldDesc{OID: tt.typ} if kind := dt.Type().Kind(); kind != tt.kind { t.Errorf("(%d) got: %s want: %s", i, kind, tt.kind) } } } func TestDataTypeLength(t *testing.T) { tts := []struct { typ oid.Oid len int mod int length int64 ok bool }{ {oid.T_int4, 0, -1, 0, false}, {oid.T_varchar, 65535, 9, 5, true}, {oid.T_text, 65535, -1, math.MaxInt64, true}, {oid.T_bytea, 65535, -1, math.MaxInt64, true}, } for i, tt := range tts { dt := fieldDesc{OID: tt.typ, Len: tt.len, Mod: tt.mod} if l, k := dt.Length(); k != tt.ok || l != tt.length { t.Errorf("(%d) got: %d, %t want: %d, %t", i, l, k, tt.length, tt.ok) } } } func TestDataTypePrecisionScale(t *testing.T) { tts := []struct { typ oid.Oid mod int precision, scale int64 ok bool }{ {oid.T_int4, -1, 0, 0, false}, {oid.T_numeric, 589830, 9, 2, true}, {oid.T_text, -1, 0, 0, false}, } for i, tt := range tts { dt := fieldDesc{OID: tt.typ, Mod: tt.mod} p, s, k := dt.PrecisionScale() if k != tt.ok { t.Errorf("(%d) got: %t want: %t", i, k, tt.ok) } if p != tt.precision { t.Errorf("(%d) wrong precision got: %d want: %d", i, p, tt.precision) } if s != tt.scale { t.Errorf("(%d) wrong scale got: %d want: %d", i, s, tt.scale) } } } func TestRowsColumnTypes(t *testing.T) { columnTypesTests := []struct { Name string TypeName string Length struct { Len int64 OK bool } DecimalSize struct { Precision int64 Scale int64 OK bool } ScanType reflect.Type }{ { Name: "a", TypeName: "INT4", Length: struct { Len int64 OK bool }{ Len: 0, OK: false, }, DecimalSize: struct { Precision int64 Scale int64 OK bool }{ Precision: 0, Scale: 0, OK: false, }, ScanType: reflect.TypeOf(int32(0)), }, { Name: "bar", TypeName: "TEXT", Length: struct { Len int64 OK bool }{ Len: math.MaxInt64, OK: true, }, DecimalSize: struct { Precision int64 Scale int64 OK bool }{ Precision: 0, Scale: 0, OK: false, }, ScanType: reflect.TypeOf(""), }, } db := openTestConn(t) defer db.Close() rows, err := db.Query("SELECT 1 AS a, text 'bar' AS bar, 1.28::numeric(9, 2) AS dec") if err != nil { t.Fatal(err) } columns, err := rows.ColumnTypes() if err != nil { t.Fatal(err) } if len(columns) != 3 { t.Errorf("expected 3 columns found %d", len(columns)) } for i, tt := range columnTypesTests { c := columns[i] if c.Name() != tt.Name { t.Errorf("(%d) got: %s, want: %s", i, c.Name(), tt.Name) } if c.DatabaseTypeName() != tt.TypeName { t.Errorf("(%d) got: %s, want: %s", i, c.DatabaseTypeName(), tt.TypeName) } l, ok := c.Length() if l != tt.Length.Len { t.Errorf("(%d) got: %d, want: %d", i, l, tt.Length.Len) } if ok != tt.Length.OK { t.Errorf("(%d) got: %t, want: %t", i, ok, tt.Length.OK) } p, s, ok := c.DecimalSize() if p != tt.DecimalSize.Precision { t.Errorf("(%d) got: %d, want: %d", i, p, tt.DecimalSize.Precision) } if s != tt.DecimalSize.Scale { t.Errorf("(%d) got: %d, want: %d", i, s, tt.DecimalSize.Scale) } if ok != tt.DecimalSize.OK { t.Errorf("(%d) got: %t, want: %t", i, ok, tt.DecimalSize.OK) } if c.ScanType() != tt.ScanType { t.Errorf("(%d) got: %v, want: %v", i, c.ScanType(), tt.ScanType) } } } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/.github/0000755000000000000000000000000015024302467021267 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/.github/workflows/0000755000000000000000000000000015024302467023324 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/.github/workflows/codeql-analysis.yml0000644000000000000000000000067215024302467027144 0ustar rootrootname: "CodeQL" on: [push, pull_request] jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write steps: - name: Checkout repo uses: actions/checkout@v2 - name: Initialize CodeQL uses: github/codeql-action/init@v1 with: languages: 'go' - name: CodeQL Analysis uses: github/codeql-action/analyze@v1 dependencies/pkg/mod/github.com/lib/pq@v1.10.9/.github/workflows/test.yml0000644000000000000000000002135415024302467025033 0ustar rootrootname: Test on: [push, pull_request] jobs: test: runs-on: ubuntu-latest strategy: fail-fast: false matrix: postgres: - '13' - '12' - '11' - '10' - '9.6' go: - '1.17' - '1.16' - '1.15' - '1.14' steps: - name: setup postgres pre-reqs run: | mkdir init cat < init/root.crt -----BEGIN CERTIFICATE----- MIIEBjCCAu6gAwIBAgIJAPizR+OD14YnMA0GCSqGSIb3DQEBCwUAMF4xCzAJBgNV BAYTAlVTMQ8wDQYDVQQIDAZOZXZhZGExEjAQBgNVBAcMCUxhcyBWZWdhczEaMBgG A1UECgwRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMMBXBxIENBMB4XDTIxMDkw MjAxNTUwMloXDTMxMDkwMzAxNTUwMlowXjELMAkGA1UEBhMCVVMxDzANBgNVBAgM Bk5ldmFkYTESMBAGA1UEBwwJTGFzIFZlZ2FzMRowGAYDVQQKDBFnaXRodWIuY29t L2xpYi9wcTEOMAwGA1UEAwwFcHEgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQDb9d6sjdU6GdibGrXRMOHREH3MRUS8T4TFqGgPEGVDP/V5bAZlBSGP AN0o9DTyVLcbQpBt8zMTw9KeIzIIe5NIVkSmA16lw/YckGhOM+kZIkiDuE6qt5Ia OQCRMdXkZ8ejG/JUu+rHU8FJZL8DE+jyYherzdjkeVAQ7JfzxAwW2Dl7T/47g337 Pwmf17AEb8ibSqmXyUN7R5NhJQs+hvaYdNagzdx91E1H+qlyBvmiNeasUQljLvZ+ Y8wAuU79neA+d09O4PBiYwV17rSP6SZCeGE3oLZviL/0KM9Xig88oB+2FmvQ6Zxa L7SoBlqS+5pBZwpH7eee/wCIKAnJtMAJAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUw AwEB/zAdBgNVHQ4EFgQUfIXEczahbcM2cFrwclJF7GbdajkwgZAGA1UdIwSBiDCB hYAUfIXEczahbcM2cFrwclJF7GbdajmhYqRgMF4xCzAJBgNVBAYTAlVTMQ8wDQYD VQQIDAZOZXZhZGExEjAQBgNVBAcMCUxhcyBWZWdhczEaMBgGA1UECgwRZ2l0aHVi LmNvbS9saWIvcHExDjAMBgNVBAMMBXBxIENBggkA+LNH44PXhicwDQYJKoZIhvcN AQELBQADggEBABFyGgSz2mHVJqYgX1Y+7P+MfKt83cV2uYDGYvXrLG2OGiCilVul oTBG+8omIMSHOsQZvWMpA5H0tnnlQHrKpKpUyKkSL+Wv5GL0UtBmHX7mVRiaK2l4 q2BjRaQUitp/FH4NSdXtVrMME5T1JBBZHsQkNL3cNRzRKwY/Vj5UGEDxDS7lILUC e01L4oaK0iKQn4beALU+TvKoAHdPvoxpPpnhkF5ss9HmdcvRktJrKZemDJZswZ7/ +omx8ZPIYYUH5VJJYYE88S7guAt+ZaKIUlel/t6xPbo2ZySFSg9u1uB99n+jTo3L 1rAxFnN3FCX2jBqgP29xMVmisaN5k04UmyI= -----END CERTIFICATE----- CONF cat < init/server.crt -----BEGIN CERTIFICATE----- MIIDqzCCApOgAwIBAgIJAPiewLrOyYipMA0GCSqGSIb3DQEBCwUAMF4xCzAJBgNV BAYTAlVTMQ8wDQYDVQQIDAZOZXZhZGExEjAQBgNVBAcMCUxhcyBWZWdhczEaMBgG A1UECgwRZ2l0aHViLmNvbS9saWIvcHExDjAMBgNVBAMMBXBxIENBMB4XDTIxMDkw MjAxNTUwMloXDTMxMDkwMzAxNTUwMlowTjELMAkGA1UEBhMCVVMxDzANBgNVBAgM Bk5ldmFkYTESMBAGA1UEBwwJTGFzIFZlZ2FzMRowGAYDVQQKDBFnaXRodWIuY29t L2xpYi9wcTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKf6H4UzmANN QiQJe92Mf3ETMYmpZKNNO9DPEHyNLIkag+XwMrBTdcCK0mLvsNCYpXuBN6703KCd WAFOeMmj7gOsWtvjt5Xm6bRHLgegekXzcG/jDwq/wyzeDzr/YkITuIlG44Lf9lhY FLwiHlHOWHnwrZaEh6aU//02aQkzyX5INeXl/3TZm2G2eIH6AOxOKOU27MUsyVSQ 5DE+SDKGcRP4bElueeQWvxAXNMZYb7sVSDdfHI3zr32K4k/tC8x0fZJ5XN/dvl4t 4N4MrYlmDO5XOrb/gQH1H4iu6+5EMDfZYab4fkThnNFdfFqu4/8Scv7KZ8mWqpKM fGAjEPctQi0CAwEAAaN8MHowHQYDVR0OBBYEFENExPbmDyFB2AJUdbMvVyhlNPD5 MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdEQQMMAqCCHBvc3RncmVzMCwG CWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTANBgkq hkiG9w0BAQsFAAOCAQEAMRVbV8RiEsmp9HAtnVCZmRXMIbgPGrqjeSwk586s4K8v BSqNCqxv6s5GfCRmDYiqSqeuCVDtUJS1HsTmbxVV7Ke71WMo+xHR1ICGKOa8WGCb TGsuicG5QZXWaxeMOg4s0qpKmKko0d1aErdVsanU5dkrVS7D6729Ffnzu4lwApk6 invAB67p8u7sojwqRq5ce0vRaG+YFylTrWomF9kauEb8gKbQ9Xc7QfX+h+UH/mq9 Nvdj8LOHp6/82bZdnsYUOtV4lS1IA/qzeXpqBphxqfWabD1yLtkyJyImZKq8uIPp 0CG4jhObPdWcCkXD6bg3QK3mhwlC79OtFgxWmldCRQ== -----END CERTIFICATE----- CONF cat < init/server.key -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCn+h+FM5gDTUIk CXvdjH9xEzGJqWSjTTvQzxB8jSyJGoPl8DKwU3XAitJi77DQmKV7gTeu9NygnVgB TnjJo+4DrFrb47eV5um0Ry4HoHpF83Bv4w8Kv8Ms3g86/2JCE7iJRuOC3/ZYWBS8 Ih5Rzlh58K2WhIemlP/9NmkJM8l+SDXl5f902ZthtniB+gDsTijlNuzFLMlUkOQx PkgyhnET+GxJbnnkFr8QFzTGWG+7FUg3XxyN8699iuJP7QvMdH2SeVzf3b5eLeDe DK2JZgzuVzq2/4EB9R+IruvuRDA32WGm+H5E4ZzRXXxaruP/EnL+ymfJlqqSjHxg IxD3LUItAgMBAAECggEAOE2naQ9tIZYw2EFxikZApVcooJrtx6ropMnzHbx4NBB2 K4mChAXFj184u77ZxmGT/jzGvFcI6LE0wWNbK0NOUV7hKZk/fPhkV3AQZrAMrAu4 IVi7PwAd3JkmA8F8XuebUDA5rDGDsgL8GD9baFJA58abeLs9eMGyuF4XgOUh4bip hgHa76O2rcDWNY5HZqqRslw75FzlYkB0PCts/UJxSswj70kTTihyOhDlrm2TnyxI ne54UbGRrpfs9wiheSGLjDG81qZToBHQDwoAnjjZhu1VCaBISuGbgZrxyyRyqdnn xPW+KczMv04XyvF7v6Pz+bUEppalLXGiXnH5UtWvZQKBgQDTPCdMpNE/hwlq4nAw Kf42zIBWfbnMLVWYoeDiAOhtl9XAUAXn76xe6Rvo0qeAo67yejdbJfRq3HvGyw+q 4PS8r9gXYmLYIPQxSoLL5+rFoBCN3qFippfjLB1j32mp7+15KjRj8FF2r6xIN8fu XatSRsaqmvCWYLDRv/rbHnxwkwKBgQDLkyfFLF7BtwtPWKdqrwOM7ip1UKh+oDBS vkCQ08aEFRBU7T3jChsx5GbaW6zmsSBwBwcrHclpSkz7n3aq19DDWObJR2p80Fma rsXeIcvtEpkvT3pVX268P5d+XGs1kxgFunqTysG9yChW+xzcs5MdKBzuMPPn7rL8 MKAzdar6PwKBgEypkzW8x3h/4Moa3k6MnwdyVs2NGaZheaRIc95yJ+jGZzxBjrMr h+p2PbvU4BfO0AqOkpKRBtDVrlJqlggVVp04UHvEKE16QEW3Xhr0037f5cInX3j3 Lz6yXwRFLAsR2aTUzWjL6jTh8uvO2s/GzQuyRh3a16Ar/WBShY+K0+zjAoGATnLT xZjWnyHRmu8X/PWakamJ9RFzDPDgDlLAgM8LVgTj+UY/LgnL9wsEU6s2UuP5ExKy QXxGDGwUhHar/SQTj+Pnc7Mwpw6HKSOmnnY5po8fNusSwml3O9XppEkrC0c236Y/ 7EobJO5IFVTJh4cv7vFxTJzSsRL8KFD4uzvh+nMCgYEAqY8NBYtIgNJA2B6C6hHF +bG7v46434ZHFfGTmMQwzE4taVg7YRnzYESAlvK4bAP5ZXR90n7GRGFhrXzoMZ38 r0bw/q9rV+ReGda7/Bjf7ciCKiq0RODcHtf4IaskjPXCoQRGJtgCPLhWPfld6g9v /HTvO96xv9e3eG/PKSPog94= -----END PRIVATE KEY----- CONF cat < init/hba.sh cat < /var/lib/postgresql/data/pg_hba.conf local all all trust host all postgres all trust hostnossl all pqgossltest all reject hostnossl all pqgosslcert all reject hostssl all pqgossltest all trust hostssl all pqgosslcert all cert host all all all trust EOF CONF sudo chown 999:999 ./init/* sudo chmod 600 ./init/* - name: start postgres run: | docker run -d \ --name pg \ -p 5432:5432 \ -v $(pwd)/init:/init \ -e POSTGRES_PASSWORD=unused \ -e POSTGRES_USER=postgres \ postgres:${{ matrix.postgres }} \ -c ssl=on \ -c ssl_ca_file=/init/root.crt \ -c ssl_cert_file=/init/server.crt \ -c ssl_key_file=/init/server.key - name: configure postgres run: | n=0 until [ "$n" -ge 10 ] do docker exec pg pg_isready -h localhost && break n=$((n+1)) echo waiting for postgres to be ready... sleep 1 done docker exec pg bash /init/hba.sh n=0 until [ "$n" -ge 10 ] do docker exec pg su postgres -c '/usr/lib/postgresql/${{ matrix.postgres }}/bin/pg_ctl reload' && break n=$((n+1)) echo waiting for postgres to reload... sleep 1 done - name: setup hosts run: echo '127.0.0.1 postgres' | sudo tee -a /etc/hosts - name: create db/roles run: | n=0 until [ "$n" -ge 10 ] do docker exec pg pg_isready -h localhost && break n=$((n+1)) echo waiting for postgres to be ready... sleep 1 done docker exec pg createdb -h localhost -U postgres pqgotest docker exec pg createuser -h localhost -U postgres -DRS pqgossltest docker exec pg createuser -h localhost -U postgres -DRS pqgosslcert - name: check out code into the Go module directory uses: actions/checkout@v2 - name: set up go uses: actions/setup-go@v2 with: go-version: ${{ matrix.go }} id: go - name: set key perms run: sudo chmod 600 certs/postgresql.key - name: run tests env: PGUSER: postgres PGHOST: localhost PGPORT: 5432 PQGOSSLTESTS: 1 PQSSLCERTTEST_PATH: certs run: | PQTEST_BINARY_PARAMETERS=no go test -race -v ./... PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... - name: install goimports run: go get golang.org/x/tools/cmd/goimports - name: install staticcheck run: | wget https://github.com/dominikh/go-tools/releases/latest/download/staticcheck_linux_amd64.tar.gz -O - | tar -xz staticcheck - name: run goimports run: | goimports -d -e . | awk '{ print } END { exit NR == 0 ? 0 : 1 }' - name: run staticcheck run: ./staticcheck/staticcheck -go 1.13 ./... - name: build run: go build -v . dependencies/pkg/mod/github.com/lib/pq@v1.10.9/uuid.go0000644000000000000000000000105315024302467021223 0ustar rootrootpackage pq import ( "encoding/hex" "fmt" ) // decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. func decodeUUIDBinary(src []byte) ([]byte, error) { if len(src) != 16 { return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) } dst := make([]byte, 36) dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' hex.Encode(dst[0:], src[0:4]) hex.Encode(dst[9:], src[4:6]) hex.Encode(dst[14:], src[6:8]) hex.Encode(dst[19:], src[8:10]) hex.Encode(dst[24:], src[10:16]) return dst, nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/notice.go0000644000000000000000000000477415024302467021553 0ustar rootroot//go:build go1.10 // +build go1.10 package pq import ( "context" "database/sql/driver" ) // NoticeHandler returns the notice handler on the given connection, if any. A // runtime panic occurs if c is not a pq connection. This is rarely used // directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. func NoticeHandler(c driver.Conn) func(*Error) { return c.(*conn).noticeHandler } // SetNoticeHandler sets the given notice handler on the given connection. A // runtime panic occurs if c is not a pq connection. A nil handler may be used // to unset it. This is rarely used directly, use ConnectorNoticeHandler and // ConnectorWithNoticeHandler instead. // // Note: Notice handlers are executed synchronously by pq meaning commands // won't continue to be processed until the handler returns. func SetNoticeHandler(c driver.Conn, handler func(*Error)) { c.(*conn).noticeHandler = handler } // NoticeHandlerConnector wraps a regular connector and sets a notice handler // on it. type NoticeHandlerConnector struct { driver.Connector noticeHandler func(*Error) } // Connect calls the underlying connector's connect method and then sets the // notice handler. func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { c, err := n.Connector.Connect(ctx) if err == nil { SetNoticeHandler(c, n.noticeHandler) } return c, err } // ConnectorNoticeHandler returns the currently set notice handler, if any. If // the given connector is not a result of ConnectorWithNoticeHandler, nil is // returned. func ConnectorNoticeHandler(c driver.Connector) func(*Error) { if c, ok := c.(*NoticeHandlerConnector); ok { return c.noticeHandler } return nil } // ConnectorWithNoticeHandler creates or sets the given handler for the given // connector. If the given connector is a result of calling this function // previously, it is simply set on the given connector and returned. Otherwise, // this returns a new connector wrapping the given one and setting the notice // handler. A nil notice handler may be used to unset it. // // The returned connector is intended to be used with database/sql.OpenDB. // // Note: Notice handlers are executed synchronously by pq meaning commands // won't continue to be processed until the handler returns. func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { if c, ok := c.(*NoticeHandlerConnector); ok { c.noticeHandler = handler return c } return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/connector_example_test.go0000644000000000000000000000061115024302467025020 0ustar rootroot//go:build go1.10 // +build go1.10 package pq_test import ( "database/sql" "fmt" "github.com/lib/pq" ) func ExampleNewConnector() { name := "" connector, err := pq.NewConnector(name) if err != nil { fmt.Println(err) return } db := sql.OpenDB(connector) defer db.Close() // Use the DB txn, err := db.Begin() if err != nil { fmt.Println(err) return } txn.Rollback() } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/user_posix.go0000644000000000000000000000110315024302467022451 0ustar rootroot// Package pq is a pure Go Postgres driver for the database/sql package. //go:build aix || darwin || dragonfly || freebsd || (linux && !android) || nacl || netbsd || openbsd || plan9 || solaris || rumprun || illumos // +build aix darwin dragonfly freebsd linux,!android nacl netbsd openbsd plan9 solaris rumprun illumos package pq import ( "os" "os/user" ) func userCurrent() (string, error) { u, err := user.Current() if err == nil { return u.Username, nil } name := os.Getenv("USER") if name != "" { return name, nil } return "", ErrCouldNotDetectUsername } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/hstore/0000755000000000000000000000000015024302467021233 5ustar rootrootdependencies/pkg/mod/github.com/lib/pq@v1.10.9/hstore/hstore.go0000644000000000000000000000510515024302467023067 0ustar rootrootpackage hstore import ( "database/sql" "database/sql/driver" "strings" ) // Hstore is a wrapper for transferring Hstore values back and forth easily. type Hstore struct { Map map[string]sql.NullString } // escapes and quotes hstore keys/values // s should be a sql.NullString or string func hQuote(s interface{}) string { var str string switch v := s.(type) { case sql.NullString: if !v.Valid { return "NULL" } str = v.String case string: str = v default: panic("not a string or sql.NullString") } str = strings.Replace(str, "\\", "\\\\", -1) return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"` } // Scan implements the Scanner interface. // // Note h.Map is reallocated before the scan to clear existing values. If the // hstore column's database value is NULL, then h.Map is set to nil instead. func (h *Hstore) Scan(value interface{}) error { if value == nil { h.Map = nil return nil } h.Map = make(map[string]sql.NullString) var b byte pair := [][]byte{{}, {}} pi := 0 inQuote := false didQuote := false sawSlash := false bindex := 0 for bindex, b = range value.([]byte) { if sawSlash { pair[pi] = append(pair[pi], b) sawSlash = false continue } switch b { case '\\': sawSlash = true continue case '"': inQuote = !inQuote if !didQuote { didQuote = true } continue default: if !inQuote { switch b { case ' ', '\t', '\n', '\r': continue case '=': continue case '>': pi = 1 didQuote = false continue case ',': s := string(pair[1]) if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} } else { h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} } pair[0] = []byte{} pair[1] = []byte{} pi = 0 continue } } } pair[pi] = append(pair[pi], b) } if bindex > 0 { s := string(pair[1]) if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" { h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false} } else { h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true} } } return nil } // Value implements the driver Valuer interface. Note if h.Map is nil, the // database column value will be set to NULL. func (h Hstore) Value() (driver.Value, error) { if h.Map == nil { return nil, nil } parts := []string{} for key, val := range h.Map { thispart := hQuote(key) + "=>" + hQuote(val) parts = append(parts, thispart) } return []byte(strings.Join(parts, ",")), nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/hstore/hstore_test.go0000644000000000000000000000740615024302467024134 0ustar rootrootpackage hstore import ( "database/sql" "os" "testing" _ "github.com/lib/pq" ) type Fatalistic interface { Fatal(args ...interface{}) } func openTestConn(t Fatalistic) *sql.DB { datname := os.Getenv("PGDATABASE") sslmode := os.Getenv("PGSSLMODE") if datname == "" { os.Setenv("PGDATABASE", "pqgotest") } if sslmode == "" { os.Setenv("PGSSLMODE", "disable") } conn, err := sql.Open("postgres", "") if err != nil { t.Fatal(err) } return conn } func TestHstore(t *testing.T) { db := openTestConn(t) defer db.Close() // quietly create hstore if it doesn't exist _, err := db.Exec("CREATE EXTENSION IF NOT EXISTS hstore") if err != nil { t.Skipf("Skipping hstore tests - hstore extension create failed: %s", err.Error()) } hs := Hstore{} // test for null-valued hstores err = db.QueryRow("SELECT NULL::hstore").Scan(&hs) if err != nil { t.Fatal(err) } if hs.Map != nil { t.Fatalf("expected null map") } err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) if err != nil { t.Fatalf("re-query null map failed: %s", err.Error()) } if hs.Map != nil { t.Fatalf("expected null map") } // test for empty hstores err = db.QueryRow("SELECT ''::hstore").Scan(&hs) if err != nil { t.Fatal(err) } if hs.Map == nil { t.Fatalf("expected empty map, got null map") } if len(hs.Map) != 0 { t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) } err = db.QueryRow("SELECT $1::hstore", hs).Scan(&hs) if err != nil { t.Fatalf("re-query empty map failed: %s", err.Error()) } if hs.Map == nil { t.Fatalf("expected empty map, got null map") } if len(hs.Map) != 0 { t.Fatalf("expected empty map, got len(map)=%d", len(hs.Map)) } // a few example maps to test out hsOnePair := Hstore{ Map: map[string]sql.NullString{ "key1": {String: "value1", Valid: true}, }, } hsThreePairs := Hstore{ Map: map[string]sql.NullString{ "key1": {String: "value1", Valid: true}, "key2": {String: "value2", Valid: true}, "key3": {String: "value3", Valid: true}, }, } hsSmorgasbord := Hstore{ Map: map[string]sql.NullString{ "nullstring": {String: "NULL", Valid: true}, "actuallynull": {String: "", Valid: false}, "NULL": {String: "NULL string key", Valid: true}, "withbracket": {String: "value>42", Valid: true}, "withequal": {String: "value=42", Valid: true}, `"withquotes1"`: {String: `this "should" be fine`, Valid: true}, `"withquotes"2"`: {String: `this "should\" also be fine`, Valid: true}, "embedded1": {String: "value1=>x1", Valid: true}, "embedded2": {String: `"value2"=>x2`, Valid: true}, "withnewlines": {String: "\n\nvalue\t=>2", Valid: true}, "<>": {String: `this, "should,\" also, => be fine`, Valid: true}, }, } // test encoding in query params, then decoding during Scan testBidirectional := func(h Hstore) { err = db.QueryRow("SELECT $1::hstore", h).Scan(&hs) if err != nil { t.Fatalf("re-query %d-pair map failed: %s", len(h.Map), err.Error()) } if hs.Map == nil { t.Fatalf("expected %d-pair map, got null map", len(h.Map)) } if len(hs.Map) != len(h.Map) { t.Fatalf("expected %d-pair map, got len(map)=%d", len(h.Map), len(hs.Map)) } for key, val := range hs.Map { otherval, found := h.Map[key] if !found { t.Fatalf(" key '%v' not found in %d-pair map", key, len(h.Map)) } if otherval.Valid != val.Valid { t.Fatalf(" value %v <> %v in %d-pair map", otherval, val, len(h.Map)) } if otherval.String != val.String { t.Fatalf(" value '%v' <> '%v' in %d-pair map", otherval.String, val.String, len(h.Map)) } } } testBidirectional(hsOnePair) testBidirectional(hsThreePairs) testBidirectional(hsSmorgasbord) } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/connector.go0000644000000000000000000000711215024302467022251 0ustar rootrootpackage pq import ( "context" "database/sql/driver" "errors" "fmt" "os" "strings" ) // Connector represents a fixed configuration for the pq driver with a given // name. Connector satisfies the database/sql/driver Connector interface and // can be used to create any number of DB Conn's via the database/sql OpenDB // function. // // See https://golang.org/pkg/database/sql/driver/#Connector. // See https://golang.org/pkg/database/sql/#OpenDB. type Connector struct { opts values dialer Dialer } // Connect returns a connection to the database using the fixed configuration // of this Connector. Context is not used. func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { return c.open(ctx) } // Dialer allows change the dialer used to open connections. func (c *Connector) Dialer(dialer Dialer) { c.dialer = dialer } // Driver returns the underlying driver of this Connector. func (c *Connector) Driver() driver.Driver { return &Driver{} } // NewConnector returns a connector for the pq driver in a fixed configuration // with the given dsn. The returned connector can be used to create any number // of equivalent Conn's. The returned connector is intended to be used with // database/sql.OpenDB. // // See https://golang.org/pkg/database/sql/driver/#Connector. // See https://golang.org/pkg/database/sql/#OpenDB. func NewConnector(dsn string) (*Connector, error) { var err error o := make(values) // A number of defaults are applied here, in this order: // // * Very low precedence defaults applied in every situation // * Environment variables // * Explicitly passed connection information o["host"] = "localhost" o["port"] = "5432" // N.B.: Extra float digits should be set to 3, but that breaks // Postgres 8.4 and older, where the max is 2. o["extra_float_digits"] = "2" for k, v := range parseEnviron(os.Environ()) { o[k] = v } if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { dsn, err = ParseURL(dsn) if err != nil { return nil, err } } if err := parseOpts(dsn, o); err != nil { return nil, err } // Use the "fallback" application name if necessary if fallback, ok := o["fallback_application_name"]; ok { if _, ok := o["application_name"]; !ok { o["application_name"] = fallback } } // We can't work with any client_encoding other than UTF-8 currently. // However, we have historically allowed the user to set it to UTF-8 // explicitly, and there's no reason to break such programs, so allow that. // Note that the "options" setting could also set client_encoding, but // parsing its value is not worth it. Instead, we always explicitly send // client_encoding as a separate run-time parameter, which should override // anything set in options. if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { return nil, errors.New("client_encoding must be absent or 'UTF8'") } o["client_encoding"] = "UTF8" // DateStyle needs a similar treatment. if datestyle, ok := o["datestyle"]; ok { if datestyle != "ISO, MDY" { return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) } } else { o["datestyle"] = "ISO, MDY" } // If a user is not provided by any other means, the last // resort is to use the current operating system provided user // name. if _, ok := o["user"]; !ok { u, err := userCurrent() if err != nil { return nil, err } o["user"] = u } // SSL is not necessary or supported over UNIX domain sockets if network, _ := network(o); network == "unix" { o["sslmode"] = "disable" } return &Connector{opts: o, dialer: defaultDialer{}}, nil } dependencies/pkg/mod/github.com/lib/pq@v1.10.9/krb.go0000644000000000000000000000145715024302467021043 0ustar rootrootpackage pq // NewGSSFunc creates a GSS authentication provider, for use with // RegisterGSSProvider. type NewGSSFunc func() (GSS, error) var newGss NewGSSFunc // RegisterGSSProvider registers a GSS authentication provider. For example, if // you need to use Kerberos to authenticate with your server, add this to your // main package: // // import "github.com/lib/pq/auth/kerberos" // // func init() { // pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) // } func RegisterGSSProvider(newGssArg NewGSSFunc) { newGss = newGssArg } // GSS provides GSSAPI authentication (e.g., Kerberos). type GSS interface { GetInitToken(host string, service string) ([]byte, error) GetInitTokenFromSpn(spn string) ([]byte, error) Continue(inToken []byte) (done bool, outToken []byte, err error) } dependencies/pkg/mod/github.com/jmoiron/0000775000000000000000000000000015024302467017263 5ustar rootrootdependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/0000755000000000000000000000000015024302467021117 5ustar rootrootdependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/sqlx_test.go0000644000000000000000000013667215024302467023513 0ustar rootroot// The following environment variables, if set, will be used: // // - SQLX_SQLITE_DSN // - SQLX_POSTGRES_DSN // - SQLX_MYSQL_DSN // // Set any of these variables to 'skip' to skip them. Note that for MySQL, // the string '?parseTime=True' will be appended to the DSN if it's not there // already. package sqlx import ( "database/sql" "database/sql/driver" "encoding/json" "fmt" "log" "os" "reflect" "strings" "testing" "time" _ "github.com/go-sql-driver/mysql" _ "github.com/lib/pq" _ "github.com/mattn/go-sqlite3" "github.com/jmoiron/sqlx/reflectx" ) /* compile time checks that Db, Tx, Stmt (qStmt) implement expected interfaces */ var _, _ Ext = &DB{}, &Tx{} var _, _ ColScanner = &Row{}, &Rows{} var _ Queryer = &qStmt{} var _ Execer = &qStmt{} var TestPostgres = true var TestSqlite = true var TestMysql = true var sldb *DB var pgdb *DB var mysqldb *DB func init() { ConnectAll() } func ConnectAll() { var err error pgdsn := os.Getenv("SQLX_POSTGRES_DSN") mydsn := os.Getenv("SQLX_MYSQL_DSN") sqdsn := os.Getenv("SQLX_SQLITE_DSN") TestPostgres = pgdsn != "skip" TestMysql = mydsn != "skip" TestSqlite = sqdsn != "skip" if !strings.Contains(mydsn, "parseTime=true") { mydsn += "?parseTime=true" } if TestPostgres { pgdb, err = Connect("postgres", pgdsn) if err != nil { fmt.Printf("Disabling PG tests:\n %v\n", err) TestPostgres = false } } else { fmt.Println("Disabling Postgres tests.") } if TestMysql { mysqldb, err = Connect("mysql", mydsn) if err != nil { fmt.Printf("Disabling MySQL tests:\n %v", err) TestMysql = false } } else { fmt.Println("Disabling MySQL tests.") } if TestSqlite { sldb, err = Connect("sqlite3", sqdsn) if err != nil { fmt.Printf("Disabling SQLite:\n %v", err) TestSqlite = false } } else { fmt.Println("Disabling SQLite tests.") } } type Schema struct { create string drop string } func (s Schema) Postgres() (string, string, string) { return s.create, s.drop, `now()` } func (s Schema) MySQL() (string, string, string) { return strings.Replace(s.create, `"`, "`", -1), s.drop, `now()` } func (s Schema) Sqlite3() (string, string, string) { return strings.Replace(s.create, `now()`, `CURRENT_TIMESTAMP`, -1), s.drop, `CURRENT_TIMESTAMP` } var defaultSchema = Schema{ create: ` CREATE TABLE person ( first_name text, last_name text, email text, added_at timestamp default now() ); CREATE TABLE place ( country text, city text NULL, telcode integer ); CREATE TABLE capplace ( "COUNTRY" text, "CITY" text NULL, "TELCODE" integer ); CREATE TABLE nullperson ( first_name text NULL, last_name text NULL, email text NULL ); CREATE TABLE employees ( name text, id integer, boss_id integer ); `, drop: ` drop table person; drop table place; drop table capplace; drop table nullperson; drop table employees; `, } type Person struct { FirstName string `db:"first_name"` LastName string `db:"last_name"` Email string AddedAt time.Time `db:"added_at"` } type Person2 struct { FirstName sql.NullString `db:"first_name"` LastName sql.NullString `db:"last_name"` Email sql.NullString } type Place struct { Country string City sql.NullString TelCode int } type PlacePtr struct { Country string City *string TelCode int } type PersonPlace struct { Person Place } type PersonPlacePtr struct { *Person *Place } type EmbedConflict struct { FirstName string `db:"first_name"` Person } type SliceMember struct { Country string City sql.NullString TelCode int People []Person `db:"-"` Addresses []Place `db:"-"` } // Note that because of field map caching, we need a new type here // if we've used Place already somewhere in sqlx type CPlace Place func MultiExec(e Execer, query string) { stmts := strings.Split(query, ";\n") if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 { stmts = stmts[:len(stmts)-1] } for _, s := range stmts { _, err := e.Exec(s) if err != nil { fmt.Println(err, s) } } } func RunWithSchema(schema Schema, t *testing.T, test func(db *DB, t *testing.T, now string)) { runner := func(db *DB, t *testing.T, create, drop, now string) { defer func() { MultiExec(db, drop) }() MultiExec(db, create) test(db, t, now) } if TestPostgres { create, drop, now := schema.Postgres() runner(pgdb, t, create, drop, now) } if TestSqlite { create, drop, now := schema.Sqlite3() runner(sldb, t, create, drop, now) } if TestMysql { create, drop, now := schema.MySQL() runner(mysqldb, t, create, drop, now) } } func loadDefaultFixture(db *DB, t *testing.T) { tx := db.MustBegin() tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net") tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net") tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") if db.DriverName() == "mysql" { tx.MustExec(tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27") } else { tx.MustExec(tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27") } tx.MustExec(tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444") tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444") tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444") tx.Commit() } // Test a new backwards compatible feature, that missing scan destinations // will silently scan into sql.RawText rather than failing/panicing func TestMissingNames(t *testing.T) { RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) type PersonPlus struct { FirstName string `db:"first_name"` LastName string `db:"last_name"` Email string // AddedAt time.Time `db:"added_at"` } // test Select first pps := []PersonPlus{} // pps lacks added_at destination err := db.Select(&pps, "SELECT * FROM person") if err == nil { t.Error("Expected missing name from Select to fail, but it did not.") } // test Get pp := PersonPlus{} err = db.Get(&pp, "SELECT * FROM person LIMIT 1") if err == nil { t.Error("Expected missing name Get to fail, but it did not.") } // test naked StructScan pps = []PersonPlus{} rows, err := db.Query("SELECT * FROM person LIMIT 1") if err != nil { t.Fatal(err) } rows.Next() err = StructScan(rows, &pps) if err == nil { t.Error("Expected missing name in StructScan to fail, but it did not.") } rows.Close() // now try various things with unsafe set. db = db.Unsafe() pps = []PersonPlus{} err = db.Select(&pps, "SELECT * FROM person") if err != nil { t.Error(err) } // test Get pp = PersonPlus{} err = db.Get(&pp, "SELECT * FROM person LIMIT 1") if err != nil { t.Error(err) } // test naked StructScan pps = []PersonPlus{} rowsx, err := db.Queryx("SELECT * FROM person LIMIT 1") if err != nil { t.Fatal(err) } rowsx.Next() err = StructScan(rowsx, &pps) if err != nil { t.Error(err) } rowsx.Close() // test Named stmt if !isUnsafe(db) { t.Error("Expected db to be unsafe, but it isn't") } nstmt, err := db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`) if err != nil { t.Fatal(err) } // its internal stmt should be marked unsafe if !nstmt.Stmt.unsafe { t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety") } pps = []PersonPlus{} err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"}) if err != nil { t.Fatal(err) } if len(pps) != 1 { t.Errorf("Expected 1 person back, got %d", len(pps)) } // test it with a safe db db.unsafe = false if isUnsafe(db) { t.Error("expected db to be safe but it isn't") } nstmt, err = db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`) if err != nil { t.Fatal(err) } // it should be safe if isUnsafe(nstmt) { t.Error("NamedStmt did not inherit safety") } nstmt.Unsafe() if !isUnsafe(nstmt) { t.Error("expected newly unsafed NamedStmt to be unsafe") } pps = []PersonPlus{} err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"}) if err != nil { t.Fatal(err) } if len(pps) != 1 { t.Errorf("Expected 1 person back, got %d", len(pps)) } }) } func TestEmbeddedStructs(t *testing.T) { type Loop1 struct{ Person } type Loop2 struct{ Loop1 } type Loop3 struct{ Loop2 } RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) peopleAndPlaces := []PersonPlace{} err := db.Select( &peopleAndPlaces, `SELECT person.*, place.* FROM person natural join place`) if err != nil { t.Fatal(err) } for _, pp := range peopleAndPlaces { if len(pp.Person.FirstName) == 0 { t.Errorf("Expected non zero lengthed first name.") } if len(pp.Place.Country) == 0 { t.Errorf("Expected non zero lengthed country.") } } // test embedded structs with StructScan rows, err := db.Queryx( `SELECT person.*, place.* FROM person natural join place`) if err != nil { t.Error(err) } perp := PersonPlace{} rows.Next() err = rows.StructScan(&perp) if err != nil { t.Error(err) } if len(perp.Person.FirstName) == 0 { t.Errorf("Expected non zero lengthed first name.") } if len(perp.Place.Country) == 0 { t.Errorf("Expected non zero lengthed country.") } rows.Close() // test the same for embedded pointer structs peopleAndPlacesPtrs := []PersonPlacePtr{} err = db.Select( &peopleAndPlacesPtrs, `SELECT person.*, place.* FROM person natural join place`) if err != nil { t.Fatal(err) } for _, pp := range peopleAndPlacesPtrs { if len(pp.Person.FirstName) == 0 { t.Errorf("Expected non zero lengthed first name.") } if len(pp.Place.Country) == 0 { t.Errorf("Expected non zero lengthed country.") } } // test "deep nesting" l3s := []Loop3{} err = db.Select(&l3s, `select * from person`) if err != nil { t.Fatal(err) } for _, l3 := range l3s { if len(l3.Loop2.Loop1.Person.FirstName) == 0 { t.Errorf("Expected non zero lengthed first name.") } } // test "embed conflicts" ec := []EmbedConflict{} err = db.Select(&ec, `select * from person`) // I'm torn between erroring here or having some kind of working behavior // in order to allow for more flexibility in destination structs if err != nil { t.Errorf("Was not expecting an error on embed conflicts.") } }) } func TestJoinQuery(t *testing.T) { type Employee struct { Name string ID int64 // BossID is an id into the employee table BossID sql.NullInt64 `db:"boss_id"` } type Boss Employee RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) var employees []struct { Employee Boss `db:"boss"` } err := db.Select( &employees, `SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees JOIN employees AS boss ON employees.boss_id = boss.id`) if err != nil { t.Fatal(err) } for _, em := range employees { if len(em.Employee.Name) == 0 { t.Errorf("Expected non zero lengthed name.") } if em.Employee.BossID.Int64 != em.Boss.ID { t.Errorf("Expected boss ids to match") } } }) } func TestJoinQueryNamedPointerStructs(t *testing.T) { type Employee struct { Name string ID int64 // BossID is an id into the employee table BossID sql.NullInt64 `db:"boss_id"` } type Boss Employee RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) var employees []struct { Emp1 *Employee `db:"emp1"` Emp2 *Employee `db:"emp2"` *Boss `db:"boss"` } err := db.Select( &employees, `SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id", emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id", boss.id "boss.id", boss.name "boss.name" FROM employees AS emp JOIN employees AS boss ON emp.boss_id = boss.id `) if err != nil { t.Fatal(err) } for _, em := range employees { if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 { t.Errorf("Expected non zero lengthed name.") } if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID { t.Errorf("Expected boss ids to match") } } }) } func TestSelectSliceMapTime(t *testing.T) { RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) rows, err := db.Queryx("SELECT * FROM person") if err != nil { t.Fatal(err) } for rows.Next() { _, err := rows.SliceScan() if err != nil { t.Error(err) } } rows, err = db.Queryx("SELECT * FROM person") if err != nil { t.Fatal(err) } for rows.Next() { m := map[string]interface{}{} err := rows.MapScan(m) if err != nil { t.Error(err) } } }) } func TestNilReceiver(t *testing.T) { RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) var p *Person err := db.Get(p, "SELECT * FROM person LIMIT 1") if err == nil { t.Error("Expected error when getting into nil struct ptr.") } var pp *[]Person err = db.Select(pp, "SELECT * FROM person") if err == nil { t.Error("Expected an error when selecting into nil slice ptr.") } }) } func TestNamedQuery(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE place ( id integer PRIMARY KEY, name text NULL ); CREATE TABLE person ( first_name text NULL, last_name text NULL, email text NULL ); CREATE TABLE placeperson ( first_name text NULL, last_name text NULL, email text NULL, place_id integer NULL ); CREATE TABLE jsperson ( "FIRST" text NULL, last_name text NULL, "EMAIL" text NULL );`, drop: ` drop table person; drop table jsperson; drop table place; drop table placeperson; `, } RunWithSchema(schema, t, func(db *DB, t *testing.T, now string) { type Person struct { FirstName sql.NullString `db:"first_name"` LastName sql.NullString `db:"last_name"` Email sql.NullString } p := Person{ FirstName: sql.NullString{String: "ben", Valid: true}, LastName: sql.NullString{String: "doe", Valid: true}, Email: sql.NullString{String: "ben@doe.com", Valid: true}, } q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)` _, err := db.NamedExec(q1, p) if err != nil { log.Fatal(err) } p2 := &Person{} rows, err := db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", p) if err != nil { log.Fatal(err) } for rows.Next() { err = rows.StructScan(p2) if err != nil { t.Error(err) } if p2.FirstName.String != "ben" { t.Error("Expected first name of `ben`, got " + p2.FirstName.String) } if p2.LastName.String != "doe" { t.Error("Expected first name of `doe`, got " + p2.LastName.String) } } // these are tests for #73; they verify that named queries work if you've // changed the db mapper. This code checks both NamedQuery "ad-hoc" style // queries and NamedStmt queries, which use different code paths internally. old := (*db).Mapper type JSONPerson struct { FirstName sql.NullString `json:"FIRST"` LastName sql.NullString `json:"last_name"` Email sql.NullString } jp := JSONPerson{ FirstName: sql.NullString{String: "ben", Valid: true}, LastName: sql.NullString{String: "smith", Valid: true}, Email: sql.NullString{String: "ben@smith.com", Valid: true}, } db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper) // prepare queries for case sensitivity to test our ToUpper function. // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line // strings are `` we use "" by default and swap out for MySQL pdb := func(s string, db *DB) string { if db.DriverName() == "mysql" { return strings.Replace(s, `"`, "`", -1) } return s } q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)` _, err = db.NamedExec(pdb(q1, db), jp) if err != nil { t.Fatal(err, db.DriverName()) } // Checks that a person pulled out of the db matches the one we put in check := func(t *testing.T, rows *Rows) { jp = JSONPerson{} for rows.Next() { err = rows.StructScan(&jp) if err != nil { t.Error(err) } if jp.FirstName.String != "ben" { t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName()) } if jp.LastName.String != "smith" { t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName()) } if jp.Email.String != "ben@smith.com" { t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName()) } } } ns, err := db.PrepareNamed(pdb(` SELECT * FROM jsperson WHERE "FIRST"=:FIRST AND last_name=:last_name AND "EMAIL"=:EMAIL `, db)) if err != nil { t.Fatal(err) } rows, err = ns.Queryx(jp) if err != nil { t.Fatal(err) } check(t, rows) // Check exactly the same thing, but with db.NamedQuery, which does not go // through the PrepareNamed/NamedStmt path. rows, err = db.NamedQuery(pdb(` SELECT * FROM jsperson WHERE "FIRST"=:FIRST AND last_name=:last_name AND "EMAIL"=:EMAIL `, db), jp) if err != nil { t.Fatal(err) } check(t, rows) db.Mapper = old // Test nested structs type Place struct { ID int `db:"id"` Name sql.NullString `db:"name"` } type PlacePerson struct { FirstName sql.NullString `db:"first_name"` LastName sql.NullString `db:"last_name"` Email sql.NullString Place Place `db:"place"` } pl := Place{ Name: sql.NullString{String: "myplace", Valid: true}, } pp := PlacePerson{ FirstName: sql.NullString{String: "ben", Valid: true}, LastName: sql.NullString{String: "doe", Valid: true}, Email: sql.NullString{String: "ben@doe.com", Valid: true}, } q2 := `INSERT INTO place (id, name) VALUES (1, :name)` _, err = db.NamedExec(q2, pl) if err != nil { log.Fatal(err) } id := 1 pp.Place.ID = id q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)` _, err = db.NamedExec(q3, pp) if err != nil { log.Fatal(err) } pp2 := &PlacePerson{} rows, err = db.NamedQuery(` SELECT first_name, last_name, email, place.id AS "place.id", place.name AS "place.name" FROM placeperson INNER JOIN place ON place.id = placeperson.place_id WHERE place.id=:place.id`, pp) if err != nil { log.Fatal(err) } for rows.Next() { err = rows.StructScan(pp2) if err != nil { t.Error(err) } if pp2.FirstName.String != "ben" { t.Error("Expected first name of `ben`, got " + pp2.FirstName.String) } if pp2.LastName.String != "doe" { t.Error("Expected first name of `doe`, got " + pp2.LastName.String) } if pp2.Place.Name.String != "myplace" { t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String) } if pp2.Place.ID != pp.Place.ID { t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID) } } }) } func TestNilInserts(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE tt ( id integer, value text NULL DEFAULT NULL );`, drop: "drop table tt;", } RunWithSchema(schema, t, func(db *DB, t *testing.T, now string) { type TT struct { ID int Value *string } var v, v2 TT r := db.Rebind db.MustExec(r(`INSERT INTO tt (id) VALUES (1)`)) db.Get(&v, r(`SELECT * FROM tt`)) if v.ID != 1 { t.Errorf("Expecting id of 1, got %v", v.ID) } if v.Value != nil { t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) } v.ID = 2 // NOTE: this incidentally uncovered a bug which was that named queries with // pointer destinations would not work if the passed value here was not addressable, // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly // function. This next line is important as it provides the only coverage for this. db.NamedExec(`INSERT INTO tt (id, value) VALUES (:id, :value)`, v) db.Get(&v2, r(`SELECT * FROM tt WHERE id=2`)) if v.ID != v2.ID { t.Errorf("%v != %v", v.ID, v2.ID) } if v2.Value != nil { t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) } }) } func TestScanError(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE kv ( k text, v integer );`, drop: `drop table kv;`, } RunWithSchema(schema, t, func(db *DB, t *testing.T, now string) { type WrongTypes struct { K int V string } _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1) if err != nil { t.Error(err) } rows, err := db.Queryx("SELECT * FROM kv") if err != nil { t.Error(err) } for rows.Next() { var wt WrongTypes err := rows.StructScan(&wt) if err == nil { t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName()) } } }) } func TestMultiInsert(t *testing.T) { RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) q := db.Rebind(`INSERT INTO employees (name, id) VALUES (?, ?), (?, ?);`) db.MustExec(q, "Name1", 400, "name2", 500, ) }) } // FIXME: this function is kinda big but it slows things down to be constantly // loading and reloading the schema.. func TestUsage(t *testing.T) { RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) slicemembers := []SliceMember{} err := db.Select(&slicemembers, "SELECT * FROM place ORDER BY telcode ASC") if err != nil { t.Fatal(err) } people := []Person{} err = db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") if err != nil { t.Fatal(err) } jason, john := people[0], people[1] if jason.FirstName != "Jason" { t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName) } if jason.LastName != "Moiron" { t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName) } if jason.Email != "jmoiron@jmoiron.net" { t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email) } if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" { t.Errorf("John Doe's person record not what expected: Got %v\n", john) } jason = Person{} err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason") if err != nil { t.Fatal(err) } if jason.FirstName != "Jason" { t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName) } err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar") if err == nil { t.Errorf("Expecting an error, got nil\n") } if err != sql.ErrNoRows { t.Errorf("Expected sql.ErrNoRows, got %v\n", err) } // The following tests check statement reuse, which was actually a problem // due to copying being done when creating Stmt's which was eventually removed stmt1, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) if err != nil { t.Fatal(err) } jason = Person{} row := stmt1.QueryRowx("DoesNotExist") row.Scan(&jason) row = stmt1.QueryRowx("DoesNotExist") row.Scan(&jason) err = stmt1.Get(&jason, "DoesNotExist User") if err == nil { t.Error("Expected an error") } err = stmt1.Get(&jason, "DoesNotExist User 2") if err == nil { t.Fatal(err) } stmt2, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) if err != nil { t.Fatal(err) } jason = Person{} tx, err := db.Beginx() if err != nil { t.Fatal(err) } tstmt2 := tx.Stmtx(stmt2) row2 := tstmt2.QueryRowx("Jason") err = row2.StructScan(&jason) if err != nil { t.Error(err) } tx.Commit() places := []*Place{} err = db.Select(&places, "SELECT telcode FROM place ORDER BY telcode ASC") if err != nil { t.Fatal(err) } usa, singsing, honkers := places[0], places[1], places[2] if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { t.Errorf("Expected integer telcodes to work, got %#v", places) } placesptr := []PlacePtr{} err = db.Select(&placesptr, "SELECT * FROM place ORDER BY telcode ASC") if err != nil { t.Error(err) } // fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2]) // if you have null fields and use SELECT *, you must use sql.Null* in your struct // this test also verifies that you can use either a []Struct{} or a []*Struct{} places2 := []Place{} err = db.Select(&places2, "SELECT * FROM place ORDER BY telcode ASC") if err != nil { t.Fatal(err) } usa, singsing, honkers = &places2[0], &places2[1], &places2[2] // this should return a type error that &p is not a pointer to a struct slice p := Place{} err = db.Select(&p, "SELECT * FROM place ORDER BY telcode ASC") if err == nil { t.Errorf("Expected an error, argument to select should be a pointer to a struct slice") } // this should be an error pl := []Place{} err = db.Select(pl, "SELECT * FROM place ORDER BY telcode ASC") if err == nil { t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.") } if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { t.Errorf("Expected integer telcodes to work, got %#v", places) } stmt, err := db.Preparex(db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC")) if err != nil { t.Error(err) } places = []*Place{} err = stmt.Select(&places, 10) if len(places) != 2 { t.Error("Expected 2 places, got 0.") } if err != nil { t.Fatal(err) } singsing, honkers = places[0], places[1] if singsing.TelCode != 65 || honkers.TelCode != 852 { t.Errorf("Expected the right telcodes, got %#v", places) } rows, err := db.Queryx("SELECT * FROM place") if err != nil { t.Fatal(err) } place := Place{} for rows.Next() { err = rows.StructScan(&place) if err != nil { t.Fatal(err) } } rows, err = db.Queryx("SELECT * FROM place") if err != nil { t.Fatal(err) } m := map[string]interface{}{} for rows.Next() { err = rows.MapScan(m) if err != nil { t.Fatal(err) } _, ok := m["country"] if !ok { t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m) } } rows, err = db.Queryx("SELECT * FROM place") if err != nil { t.Fatal(err) } for rows.Next() { s, err := rows.SliceScan() if err != nil { t.Error(err) } if len(s) != 3 { t.Errorf("Expected 3 columns in result, got %d\n", len(s)) } } // test advanced querying // test that NamedExec works with a map as well as a struct _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{ "first": "Bin", "last": "Smuth", "email": "bensmith@allblacks.nz", }) if err != nil { t.Fatal(err) } // ensure that if the named param happens right at the end it still works // ensure that NamedQuery works with a map[string]interface{} rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"}) if err != nil { t.Fatal(err) } ben := &Person{} for rows.Next() { err = rows.StructScan(ben) if err != nil { t.Fatal(err) } if ben.FirstName != "Bin" { t.Fatal("Expected first name of `Bin`, got " + ben.FirstName) } if ben.LastName != "Smuth" { t.Fatal("Expected first name of `Smuth`, got " + ben.LastName) } } ben.FirstName = "Ben" ben.LastName = "Smith" ben.Email = "binsmuth@allblacks.nz" // Insert via a named query using the struct _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben) if err != nil { t.Fatal(err) } rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", ben) if err != nil { t.Fatal(err) } for rows.Next() { err = rows.StructScan(ben) if err != nil { t.Fatal(err) } if ben.FirstName != "Ben" { t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) } if ben.LastName != "Smith" { t.Fatal("Expected first name of `Smith`, got " + ben.LastName) } } // ensure that Get does not panic on emppty result set person := &Person{} err = db.Get(person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist") if err == nil { t.Fatal("Should have got an error for Get on non-existent row.") } // lets test prepared statements some more stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) if err != nil { t.Fatal(err) } rows, err = stmt.Queryx("Ben") if err != nil { t.Fatal(err) } for rows.Next() { err = rows.StructScan(ben) if err != nil { t.Fatal(err) } if ben.FirstName != "Ben" { t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) } if ben.LastName != "Smith" { t.Fatal("Expected first name of `Smith`, got " + ben.LastName) } } john = Person{} stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?")) if err != nil { t.Error(err) } err = stmt.Get(&john, "John") if err != nil { t.Error(err) } // test name mapping // THIS USED TO WORK BUT WILL NO LONGER WORK. db.MapperFunc(strings.ToUpper) rsa := CPlace{} err = db.Get(&rsa, "SELECT * FROM capplace;") if err != nil { t.Error(err, "in db:", db.DriverName()) } db.MapperFunc(strings.ToLower) // create a copy and change the mapper, then verify the copy behaves // differently from the original. dbCopy := NewDb(db.DB, db.DriverName()) dbCopy.MapperFunc(strings.ToUpper) err = dbCopy.Get(&rsa, "SELECT * FROM capplace;") if err != nil { fmt.Println(db.DriverName()) t.Error(err) } err = db.Get(&rsa, "SELECT * FROM cappplace;") if err == nil { t.Error("Expected no error, got ", err) } // test base type slices var sdest []string rows, err = db.Queryx("SELECT email FROM person ORDER BY email ASC;") if err != nil { t.Error(err) } err = scanAll(rows, &sdest, false) if err != nil { t.Error(err) } // test Get with base types var count int err = db.Get(&count, "SELECT count(*) FROM person;") if err != nil { t.Error(err) } if count != len(sdest) { t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest)) } // test Get and Select with time.Time, #84 var addedAt time.Time err = db.Get(&addedAt, "SELECT added_at FROM person LIMIT 1;") if err != nil { t.Error(err) } var addedAts []time.Time err = db.Select(&addedAts, "SELECT added_at FROM person;") if err != nil { t.Error(err) } // test it on a double pointer var pcount *int err = db.Get(&pcount, "SELECT count(*) FROM person;") if err != nil { t.Error(err) } if *pcount != count { t.Errorf("expected %d = %d", *pcount, count) } // test Select... sdest = []string{} err = db.Select(&sdest, "SELECT first_name FROM person ORDER BY first_name ASC;") if err != nil { t.Error(err) } expected := []string{"Ben", "Bin", "Jason", "John"} for i, got := range sdest { if got != expected[i] { t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got) } } var nsdest []sql.NullString err = db.Select(&nsdest, "SELECT city FROM place ORDER BY city ASC") if err != nil { t.Error(err) } for _, val := range nsdest { if val.Valid && val.String != "New York" { t.Errorf("expected single valid result to be `New York`, but got %s", val.String) } } }) } type Product struct { ProductID int } // tests that sqlx will not panic when the wrong driver is passed because // of an automatic nil dereference in sqlx.Open(), which was fixed. func TestDoNotPanicOnConnect(t *testing.T) { db, err := Connect("bogus", "hehe") if err == nil { t.Errorf("Should return error when using bogus driverName") } if db != nil { t.Errorf("Should not return the db on a connect failure") } } func TestRebind(t *testing.T) { q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` s1 := Rebind(DOLLAR, q1) s2 := Rebind(DOLLAR, q2) if s1 != `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)` { t.Errorf("q1 failed") } if s2 != `INSERT INTO foo (a, b, c) VALUES ($1, $2, "foo"), ("Hi", $3, $4)` { t.Errorf("q2 failed") } s1 = Rebind(AT, q1) s2 = Rebind(AT, q2) if s1 != `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (@p1, @p2, @p3, @p4, @p5, @p6, @p7, @p8, @p9, @p10)` { t.Errorf("q1 failed") } if s2 != `INSERT INTO foo (a, b, c) VALUES (@p1, @p2, "foo"), ("Hi", @p3, @p4)` { t.Errorf("q2 failed") } s1 = Rebind(NAMED, q1) s2 = Rebind(NAMED, q2) ex1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ` + `(:arg1, :arg2, :arg3, :arg4, :arg5, :arg6, :arg7, :arg8, :arg9, :arg10)` if s1 != ex1 { t.Error("q1 failed on Named params") } ex2 := `INSERT INTO foo (a, b, c) VALUES (:arg1, :arg2, "foo"), ("Hi", :arg3, :arg4)` if s2 != ex2 { t.Error("q2 failed on Named params") } } func TestBindMap(t *testing.T) { // Test that it works.. q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` am := map[string]interface{}{ "name": "Jason Moiron", "age": 30, "first": "Jason", "last": "Moiron", } bq, args, _ := bindMap(QUESTION, q1, am) expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)` if bq != expect { t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) } if args[0].(string) != "Jason Moiron" { t.Errorf("Expected `Jason Moiron`, got %v\n", args[0]) } if args[1].(int) != 30 { t.Errorf("Expected 30, got %v\n", args[1]) } if args[2].(string) != "Jason" { t.Errorf("Expected Jason, got %v\n", args[2]) } if args[3].(string) != "Moiron" { t.Errorf("Expected Moiron, got %v\n", args[3]) } } // Test for #117, embedded nil maps type Message struct { Text string `db:"string"` Properties PropertyMap `db:"properties"` // Stored as JSON in the database } type PropertyMap map[string]string // Implement driver.Valuer and sql.Scanner interfaces on PropertyMap func (p PropertyMap) Value() (driver.Value, error) { if len(p) == 0 { return nil, nil } return json.Marshal(p) } func (p PropertyMap) Scan(src interface{}) error { v := reflect.ValueOf(src) if !v.IsValid() || v.CanAddr() && v.IsNil() { return nil } switch ts := src.(type) { case []byte: return json.Unmarshal(ts, &p) case string: return json.Unmarshal([]byte(ts), &p) default: return fmt.Errorf("Could not not decode type %T -> %T", src, p) } } func TestEmbeddedMaps(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE message ( string text, properties text );`, drop: `drop table message;`, } RunWithSchema(schema, t, func(db *DB, t *testing.T, now string) { messages := []Message{ {"Hello, World", PropertyMap{"one": "1", "two": "2"}}, {"Thanks, Joy", PropertyMap{"pull": "request"}}, } q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);` for _, m := range messages { _, err := db.NamedExec(q1, m) if err != nil { t.Fatal(err) } } var count int err := db.Get(&count, "SELECT count(*) FROM message") if err != nil { t.Fatal(err) } if count != len(messages) { t.Fatalf("Expected %d messages in DB, found %d", len(messages), count) } var m Message err = db.Get(&m, "SELECT * FROM message LIMIT 1;") if err != nil { t.Fatal(err) } if m.Properties == nil { t.Fatal("Expected m.Properties to not be nil, but it was.") } }) } func TestIssue197(t *testing.T) { // this test actually tests for a bug in database/sql: // https://github.com/golang/go/issues/13905 // this potentially makes _any_ named type that is an alias for []byte // unsafe to use in a lot of different ways (basically, unsafe to hold // onto after loading from the database). t.Skip() type mybyte []byte type Var struct{ Raw json.RawMessage } type Var2 struct{ Raw []byte } type Var3 struct{ Raw mybyte } RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { var err error var v, q Var if err = db.Get(&v, `SELECT '{"a": "b"}' AS raw`); err != nil { t.Fatal(err) } if err = db.Get(&q, `SELECT 'null' AS raw`); err != nil { t.Fatal(err) } var v2, q2 Var2 if err = db.Get(&v2, `SELECT '{"a": "b"}' AS raw`); err != nil { t.Fatal(err) } if err = db.Get(&q2, `SELECT 'null' AS raw`); err != nil { t.Fatal(err) } var v3, q3 Var3 if err = db.QueryRow(`SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil { t.Fatal(err) } if err = db.QueryRow(`SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil { t.Fatal(err) } t.Fail() }) } func TestIn(t *testing.T) { // some quite normal situations type tr struct { q string args []interface{} c int } tests := []tr{ {"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?", []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}, 7}, {"SELECT * FROM foo WHERE x in (?)", []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}}, 8}, {"SELECT * FROM foo WHERE x = ? AND y in (?)", []interface{}{[]byte("foo"), []int{0, 5, 3}}, 4}, {"SELECT * FROM foo WHERE x = ? AND y IN (?)", []interface{}{sql.NullString{Valid: false}, []string{"a", "b"}}, 3}, } for _, test := range tests { q, a, err := In(test.q, test.args...) if err != nil { t.Error(err) } if len(a) != test.c { t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a) } if strings.Count(q, "?") != test.c { t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?")) } } // too many bindVars, but no slices, so short circuits parsing // i'm not sure if this is the right behavior; this query/arg combo // might not work, but we shouldn't parse if we don't need to { orig := "SELECT * FROM foo WHERE x = ? AND y = ?" q, a, err := In(orig, "foo", "bar", "baz") if err != nil { t.Error(err) } if len(a) != 3 { t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a) } if q != orig { t.Error("Expected unchanged query.") } } tests = []tr{ // too many bindvars; slice present so should return error during parse {"SELECT * FROM foo WHERE x = ? and y = ?", []interface{}{"foo", []int{1, 2, 3}, "bar"}, 0}, // empty slice, should return error before parse {"SELECT * FROM foo WHERE x = ?", []interface{}{[]int{}}, 0}, // too *few* bindvars, should return an error {"SELECT * FROM foo WHERE x = ? AND y in (?)", []interface{}{[]int{1, 2, 3}}, 0}, } for _, test := range tests { _, _, err := In(test.q, test.args...) if err == nil { t.Error("Expected an error, but got nil.") } } RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) // tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") // tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") // tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") telcodes := []int{852, 65} q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode" query, args, err := In(q, telcodes) if err != nil { t.Error(err) } query = db.Rebind(query) places := []Place{} err = db.Select(&places, query, args...) if err != nil { t.Error(err) } if len(places) != 2 { t.Fatalf("Expecting 2 results, got %d", len(places)) } if places[0].TelCode != 65 { t.Errorf("Expecting singapore first, but got %#v", places[0]) } if places[1].TelCode != 852 { t.Errorf("Expecting hong kong second, but got %#v", places[1]) } }) } func TestBindStruct(t *testing.T) { var err error q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` type tt struct { Name string Age int First string Last string } type tt2 struct { Field1 string `db:"field_1"` Field2 string `db:"field_2"` } type tt3 struct { tt2 Name string } am := tt{"Jason Moiron", 30, "Jason", "Moiron"} bq, args, _ := bindStruct(QUESTION, q1, am, mapper()) expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)` if bq != expect { t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) } if args[0].(string) != "Jason Moiron" { t.Errorf("Expected `Jason Moiron`, got %v\n", args[0]) } if args[1].(int) != 30 { t.Errorf("Expected 30, got %v\n", args[1]) } if args[2].(string) != "Jason" { t.Errorf("Expected Jason, got %v\n", args[2]) } if args[3].(string) != "Moiron" { t.Errorf("Expected Moiron, got %v\n", args[3]) } am2 := tt2{"Hello", "World"} bq, args, _ = bindStruct(QUESTION, "INSERT INTO foo (a, b) VALUES (:field_2, :field_1)", am2, mapper()) expect = `INSERT INTO foo (a, b) VALUES (?, ?)` if bq != expect { t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) } if args[0].(string) != "World" { t.Errorf("Expected 'World', got %s\n", args[0].(string)) } if args[1].(string) != "Hello" { t.Errorf("Expected 'Hello', got %s\n", args[1].(string)) } am3 := tt3{Name: "Hello!"} am3.Field1 = "Hello" am3.Field2 = "World" bq, args, err = bindStruct(QUESTION, "INSERT INTO foo (a, b, c) VALUES (:name, :field_1, :field_2)", am3, mapper()) if err != nil { t.Fatal(err) } expect = `INSERT INTO foo (a, b, c) VALUES (?, ?, ?)` if bq != expect { t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect) } if args[0].(string) != "Hello!" { t.Errorf("Expected 'Hello!', got %s\n", args[0].(string)) } if args[1].(string) != "Hello" { t.Errorf("Expected 'Hello', got %s\n", args[1].(string)) } if args[2].(string) != "World" { t.Errorf("Expected 'World', got %s\n", args[0].(string)) } } func TestEmbeddedLiterals(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE x ( k text );`, drop: `drop table x;`, } RunWithSchema(schema, t, func(db *DB, t *testing.T, now string) { type t1 struct { K *string } type t2 struct { Inline struct { F string } K *string } db.MustExec(db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three") target := t1{} err := db.Get(&target, db.Rebind("SELECT * FROM x WHERE k=?"), "one") if err != nil { t.Error(err) } if *target.K != "one" { t.Error("Expected target.K to be `one`, got ", target.K) } target2 := t2{} err = db.Get(&target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one") if err != nil { t.Error(err) } if *target2.K != "one" { t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K) } }) } func BenchmarkBindStruct(b *testing.B) { b.StopTimer() q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` type t struct { Name string Age int First string Last string } am := t{"Jason Moiron", 30, "Jason", "Moiron"} b.StartTimer() for i := 0; i < b.N; i++ { bindStruct(DOLLAR, q1, am, mapper()) } } func TestBindNamedMapper(t *testing.T) { type A map[string]interface{} m := reflectx.NewMapperFunc("db", NameMapper) query, args, err := bindNamedMapper(DOLLAR, `select :x`, A{ "x": "X!", }, m) if err != nil { t.Fatal(err) } got := fmt.Sprintf("%s %s", query, args) want := `select $1 [X!]` if got != want { t.Errorf("\ngot: %q\nwant: %q", got, want) } _, _, err = bindNamedMapper(DOLLAR, `select :x`, map[string]string{ "x": "X!", }, m) if err == nil { t.Fatal("err is nil") } if !strings.Contains(err.Error(), "unsupported map type") { t.Errorf("wrong error: %s", err) } } func BenchmarkBindMap(b *testing.B) { b.StopTimer() q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)` am := map[string]interface{}{ "name": "Jason Moiron", "age": 30, "first": "Jason", "last": "Moiron", } b.StartTimer() for i := 0; i < b.N; i++ { bindMap(DOLLAR, q1, am) } } func BenchmarkIn(b *testing.B) { q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` for i := 0; i < b.N; i++ { _, _, _ = In(q, []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}...) } } func BenchmarkIn1k(b *testing.B) { q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` var vals [1000]interface{} for i := 0; i < b.N; i++ { _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) } } func BenchmarkIn1kInt(b *testing.B) { q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` var vals [1000]int for i := 0; i < b.N; i++ { _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) } } func BenchmarkIn1kString(b *testing.B) { q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?` var vals [1000]string for i := 0; i < b.N; i++ { _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...) } } func BenchmarkRebind(b *testing.B) { b.StopTimer() q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)` q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` b.StartTimer() for i := 0; i < b.N; i++ { Rebind(DOLLAR, q1) Rebind(DOLLAR, q2) } } func BenchmarkRebindBuffer(b *testing.B) { b.StopTimer() q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)` q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)` b.StartTimer() for i := 0; i < b.N; i++ { rebindBuff(DOLLAR, q1) rebindBuff(DOLLAR, q2) } } func TestIn130Regression(t *testing.T) { t.Run("[]interface{}{}", func(t *testing.T) { q, args, err := In("SELECT * FROM people WHERE name IN (?)", []interface{}{[]string{"gopher"}}...) if err != nil { t.Fatal(err) } if q != "SELECT * FROM people WHERE name IN (?)" { t.Errorf("got=%v", q) } t.Log(args) for _, a := range args { switch a := a.(type) { case string: t.Log("ok: string", a) case *string: t.Error("ng: string pointer", a, *a) } } }) t.Run("[]string{}", func(t *testing.T) { q, args, err := In("SELECT * FROM people WHERE name IN (?)", []string{"gopher"}) if err != nil { t.Fatal(err) } if q != "SELECT * FROM people WHERE name IN (?)" { t.Errorf("got=%v", q) } t.Log(args) for _, a := range args { switch a := a.(type) { case string: t.Log("ok: string", a) case *string: t.Error("ng: string pointer", a, *a) } } }) } func TestSelectReset(t *testing.T) { RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) filledDest := []string{"a", "b", "c"} err := db.Select(&filledDest, "SELECT first_name FROM person ORDER BY first_name ASC;") if err != nil { t.Fatal(err) } if len(filledDest) != 2 { t.Errorf("Expected 2 first names, got %d.", len(filledDest)) } expected := []string{"Jason", "John"} for i, got := range filledDest { if got != expected[i] { t.Errorf("Expected %d result to be %s, but got %s.", i, expected[i], got) } } var emptyDest []string err = db.Select(&emptyDest, "SELECT first_name FROM person WHERE first_name = 'Jack';") if err != nil { t.Fatal(err) } // Verify that selecting 0 rows into a nil target didn't create a // non-nil slice. if emptyDest != nil { t.Error("Expected emptyDest to be nil") } }) } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/LICENSE0000644000000000000000000000206515024302467022127 0ustar rootroot Copyright (c) 2013, Jason Moiron Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/go.mod0000644000000000000000000000023515024302467022225 0ustar rootrootmodule github.com/jmoiron/sqlx go 1.10 require ( github.com/go-sql-driver/mysql v1.8.1 github.com/lib/pq v1.10.9 github.com/mattn/go-sqlite3 v1.14.22 ) dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/doc.go0000644000000000000000000000106715024302467022217 0ustar rootroot// Package sqlx provides general purpose extensions to database/sql. // // It is intended to seamlessly wrap database/sql and provide convenience // methods which are useful in the development of database driven applications. // None of the underlying database/sql methods are changed. Instead all extended // behavior is implemented through new methods defined on wrapper types. // // Additions include scanning into structs, named query support, rebinding // queries for different drivers, convenient shorthands for common error handling // and more. package sqlx dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/bind_test.go0000644000000000000000000000300015024302467023412 0ustar rootrootpackage sqlx import ( "math/rand" "testing" ) func oldBindType(driverName string) int { switch driverName { case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres", "ql": return DOLLAR case "mysql": return QUESTION case "sqlite3": return QUESTION case "oci8", "ora", "goracle", "godror": return NAMED case "sqlserver": return AT } return UNKNOWN } /* sync.Map implementation: goos: linux goarch: amd64 pkg: github.com/jmoiron/sqlx BenchmarkBindSpeed/old-4 100000000 11.0 ns/op BenchmarkBindSpeed/new-4 24575726 50.8 ns/op async.Value map implementation: goos: linux goarch: amd64 pkg: github.com/jmoiron/sqlx BenchmarkBindSpeed/old-4 100000000 11.0 ns/op BenchmarkBindSpeed/new-4 42535839 27.5 ns/op */ func BenchmarkBindSpeed(b *testing.B) { testDrivers := []string{ "postgres", "pgx", "mysql", "sqlite3", "ora", "sqlserver", } b.Run("old", func(b *testing.B) { b.StopTimer() var seq []int for i := 0; i < b.N; i++ { seq = append(seq, rand.Intn(len(testDrivers))) } b.StartTimer() for i := 0; i < b.N; i++ { s := oldBindType(testDrivers[seq[i]]) if s == UNKNOWN { b.Error("unknown driver") } } }) b.Run("new", func(b *testing.B) { b.StopTimer() var seq []int for i := 0; i < b.N; i++ { seq = append(seq, rand.Intn(len(testDrivers))) } b.StartTimer() for i := 0; i < b.N; i++ { s := BindType(testDrivers[seq[i]]) if s == UNKNOWN { b.Error("unknown driver") } } }) } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/README.md0000644000000000000000000002067415024302467022407 0ustar rootroot# sqlx [![CircleCI](https://dl.circleci.com/status-badge/img/gh/jmoiron/sqlx/tree/master.svg?style=shield)](https://dl.circleci.com/status-badge/redirect/gh/jmoiron/sqlx/tree/master) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) sqlx is a library which provides a set of extensions on go's standard `database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, et al. all leave the underlying interfaces untouched, so that their interfaces are a superset on the standard ones. This makes it relatively painless to integrate existing codebases using database/sql with sqlx. Major additional concepts are: * Marshal rows into structs (with embedded struct support), maps, and slices * Named parameter support including prepared statements * `Get` and `Select` to go quickly from query to struct/slice In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), there is also some [user documentation](http://jmoiron.github.io/sqlx/) that explains how to use `database/sql` along with sqlx. ## Recent Changes 1.3.0: * `sqlx.DB.Connx(context.Context) *sqlx.Conn` * `sqlx.BindDriver(driverName, bindType)` * support for `[]map[string]interface{}` to do "batch" insertions * allocation & perf improvements for `sqlx.In` DB.Connx returns an `sqlx.Conn`, which is an `sql.Conn`-alike consistent with sqlx's wrapping of other types. `BindDriver` allows users to control the bindvars that sqlx will use for drivers, and add new drivers at runtime. This results in a very slight performance hit when resolving the driver into a bind type (~40ns per call), but it allows users to specify what bindtype their driver uses even when sqlx has not been updated to know about it by default. ### Backwards Compatibility Compatibility with the most recent two versions of Go is a requirement for any new changes. Compatibility beyond that is not guaranteed. Versioning is done with Go modules. Breaking changes (eg. removing deprecated API) will get major version number bumps. ## install go get github.com/jmoiron/sqlx ## issues Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of `Columns()` does not fully qualify column names in queries like: ```sql SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; ``` making a struct or map destination ambiguous. Use `AS` in your queries to give columns distinct names, `rows.Scan` to scan them manually, or `SliceScan` to get a slice of results. ## usage Below is an example which shows some common use cases for sqlx. Check [sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more usage. ```go package main import ( "database/sql" "fmt" "log" _ "github.com/lib/pq" "github.com/jmoiron/sqlx" ) var schema = ` CREATE TABLE person ( first_name text, last_name text, email text ); CREATE TABLE place ( country text, city text NULL, telcode integer )` type Person struct { FirstName string `db:"first_name"` LastName string `db:"last_name"` Email string } type Place struct { Country string City sql.NullString TelCode int } func main() { // this Pings the database trying to connect // use sqlx.Open() for sql.Open() semantics db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") if err != nil { log.Fatalln(err) } // exec the schema or fail; multi-statement Exec behavior varies between // database drivers; pq will exec them all, sqlite3 won't, ymmv db.MustExec(schema) tx := db.MustBegin() tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) tx.Commit() // Query the database, storing results in a []Person (wrapped in []interface{}) people := []Person{} db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") jason, john := people[0], people[1] fmt.Printf("%#v\n%#v", jason, john) // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} // You can also get a single result, a la QueryRow jason = Person{} err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") fmt.Printf("%#v\n", jason) // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} // if you have null fields and use SELECT *, you must use sql.Null* in your struct places := []Place{} err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") if err != nil { fmt.Println(err) return } usa, singsing, honkers := places[0], places[1], places[2] fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} // Loop through rows using only one struct place := Place{} rows, err := db.Queryx("SELECT * FROM place") for rows.Next() { err := rows.StructScan(&place) if err != nil { log.Fatalln(err) } fmt.Printf("%#v\n", place) } // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} // Named queries, using `:name` as the bindvar. Automatic bindvar support // which takes into account the dbtype based on the driverName on sqlx.Open/Connect _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, map[string]interface{}{ "first": "Bin", "last": "Smuth", "email": "bensmith@allblacks.nz", }) // Selects Mr. Smith from the database rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) // Named queries can also use structs. Their bind names follow the same rules // as the name -> db mapping, so struct fields are lowercased and the `db` tag // is taken into consideration. rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) // batch insert // batch insert with structs personStructs := []Person{ {FirstName: "Ardie", LastName: "Savea", Email: "asavea@ab.co.nz"}, {FirstName: "Sonny Bill", LastName: "Williams", Email: "sbw@ab.co.nz"}, {FirstName: "Ngani", LastName: "Laumape", Email: "nlaumape@ab.co.nz"}, } _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)`, personStructs) // batch insert with maps personMaps := []map[string]interface{}{ {"first_name": "Ardie", "last_name": "Savea", "email": "asavea@ab.co.nz"}, {"first_name": "Sonny Bill", "last_name": "Williams", "email": "sbw@ab.co.nz"}, {"first_name": "Ngani", "last_name": "Laumape", "email": "nlaumape@ab.co.nz"}, } _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)`, personMaps) } ``` dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/.circleci/0000755000000000000000000000000015024302467022752 5ustar rootrootdependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/.circleci/config.yml0000644000000000000000000000631615024302467024750 0ustar rootrootversion: 2.1 "-": &go-versions [ "1.18.10", "1.19.13", "1.20.14", "1.21.9", "1.22.2" ] executors: go_executor: parameters: version: type: string docker: - image: cimg/go:<< parameters.version >> jobs: test: parameters: go_version: type: string executor: name: go_executor version: << parameters.go_version >> steps: - checkout - restore_cache: keys: - go-mod-v4-{{ checksum "go.sum" }} - run: name: Install Dependencies command: go mod download - save_cache: key: go-mod-v4-{{ checksum "go.sum" }} paths: - "/go/pkg/mod" - run: name: Run tests command: | mkdir -p /tmp/test-reports gotestsum --junitfile /tmp/test-reports/unit-tests.xml - store_test_results: path: /tmp/test-reports test-race: parameters: go_version: type: string executor: name: go_executor version: << parameters.go_version >> steps: - checkout - restore_cache: keys: - go-mod-v4-{{ checksum "go.sum" }} - run: name: Install Dependencies command: go mod download - save_cache: key: go-mod-v4-{{ checksum "go.sum" }} paths: - "/go/pkg/mod" - run: name: Run tests with race detector command: make test-race lint: parameters: go_version: type: string executor: name: go_executor version: << parameters.go_version >> steps: - checkout - restore_cache: keys: - go-mod-v4-{{ checksum "go.sum" }} - run: name: Install Dependencies command: go mod download - run: name: Install tooling command: | make tooling - save_cache: key: go-mod-v4-{{ checksum "go.sum" }} paths: - "/go/pkg/mod" - run: name: Linting command: make lint - run: name: Running vulncheck command: make vuln-check fmt: parameters: go_version: type: string executor: name: go_executor version: << parameters.go_version >> steps: - checkout - restore_cache: keys: - go-mod-v4-{{ checksum "go.sum" }} - run: name: Install Dependencies command: go mod download - run: name: Install tooling command: | make tooling - save_cache: key: go-mod-v4-{{ checksum "go.sum" }} paths: - "/go/pkg/mod" - run: name: Running formatting command: | make fmt make has-changes workflows: version: 2 build-and-test: jobs: - test: matrix: parameters: go_version: *go-versions - test-race: matrix: parameters: go_version: *go-versions - lint: matrix: parameters: go_version: *go-versions - fmt: matrix: parameters: go_version: *go-versions dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/named_context.go0000644000000000000000000001066715024302467024310 0ustar rootroot//go:build go1.8 // +build go1.8 package sqlx import ( "context" "database/sql" ) // A union interface of contextPreparer and binder, required to be able to // prepare named statements with context (as the bindtype must be determined). type namedPreparerContext interface { PreparerContext binder } func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { bindType := BindType(p.DriverName()) q, args, err := compileNamedQuery([]byte(query), bindType) if err != nil { return nil, err } stmt, err := PreparexContext(ctx, p, q) if err != nil { return nil, err } return &NamedStmt{ QueryString: q, Params: args, Stmt: stmt, }, nil } // ExecContext executes a named statement using the struct passed. // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) if err != nil { return *new(sql.Result), err } return n.Stmt.ExecContext(ctx, args...) } // QueryContext executes a named statement using the struct argument, returning rows. // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) if err != nil { return nil, err } return n.Stmt.QueryContext(ctx, args...) } // QueryRowContext executes a named statement against the database. Because sqlx cannot // create a *sql.Row with an error condition pre-set for binding errors, sqlx // returns a *sqlx.Row instead. // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) if err != nil { return &Row{err: err} } return n.Stmt.QueryRowxContext(ctx, args...) } // MustExecContext execs a NamedStmt, panicing on error // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { res, err := n.ExecContext(ctx, arg) if err != nil { panic(err) } return res } // QueryxContext using this NamedStmt // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { r, err := n.QueryContext(ctx, arg) if err != nil { return nil, err } return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err } // QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is // an alias for QueryRow. // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { return n.QueryRowContext(ctx, arg) } // SelectContext using this NamedStmt // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { rows, err := n.QueryxContext(ctx, arg) if err != nil { return err } // if something happens here, we want to make sure the rows are Closed defer rows.Close() return scanAll(rows, dest, false) } // GetContext using this NamedStmt // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { r := n.QueryRowxContext(ctx, arg) return r.scanAny(dest, false) } // NamedQueryContext binds a named query and then runs Query on the result using the // provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with // map[string]interface{} types. func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) if err != nil { return nil, err } return e.QueryxContext(ctx, q, args...) } // NamedExecContext uses BindStruct to get a query executable by the driver and // then runs Exec on the result. Returns an error from the binding // or the query execution itself. func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) if err != nil { return nil, err } return e.ExecContext(ctx, q, args...) } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/reflectx/0000755000000000000000000000000015024302467022733 5ustar rootrootdependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/reflectx/README.md0000644000000000000000000000125715024302467024217 0ustar rootroot# reflectx The sqlx package has special reflect needs. In particular, it needs to: * be able to map a name to a field * understand embedded structs * understand mapping names to fields by a particular tag * user specified name -> field mapping functions These behaviors mimic the behaviors by the standard library marshallers and also the behavior of standard Go accessors. The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct tags in the ways that are vital to most marshallers, and they are slow. This reflectx package extends reflect to achieve these goals. dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/reflectx/reflect_test.go0000644000000000000000000005170215024302467025752 0ustar rootrootpackage reflectx import ( "reflect" "strings" "testing" ) func ival(v reflect.Value) int { return v.Interface().(int) } func TestBasic(t *testing.T) { type Foo struct { A int B int C int } f := Foo{1, 2, 3} fv := reflect.ValueOf(f) m := NewMapperFunc("", func(s string) string { return s }) v := m.FieldByName(fv, "A") if ival(v) != f.A { t.Errorf("Expecting %d, got %d", ival(v), f.A) } v = m.FieldByName(fv, "B") if ival(v) != f.B { t.Errorf("Expecting %d, got %d", f.B, ival(v)) } v = m.FieldByName(fv, "C") if ival(v) != f.C { t.Errorf("Expecting %d, got %d", f.C, ival(v)) } } func TestBasicEmbedded(t *testing.T) { type Foo struct { A int } type Bar struct { Foo // `db:""` is implied for an embedded struct B int C int `db:"-"` } type Baz struct { A int Bar `db:"Bar"` } m := NewMapperFunc("db", func(s string) string { return s }) z := Baz{} z.A = 1 z.B = 2 z.C = 4 z.Bar.Foo.A = 3 zv := reflect.ValueOf(z) fields := m.TypeMap(reflect.TypeOf(z)) if len(fields.Index) != 5 { t.Errorf("Expecting 5 fields") } // for _, fi := range fields.Index { // log.Println(fi) // } v := m.FieldByName(zv, "A") if ival(v) != z.A { t.Errorf("Expecting %d, got %d", z.A, ival(v)) } v = m.FieldByName(zv, "Bar.B") if ival(v) != z.Bar.B { t.Errorf("Expecting %d, got %d", z.Bar.B, ival(v)) } v = m.FieldByName(zv, "Bar.A") if ival(v) != z.Bar.Foo.A { t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v)) } v = m.FieldByName(zv, "Bar.C") if _, ok := v.Interface().(int); ok { t.Errorf("Expecting Bar.C to not exist") } fi := fields.GetByPath("Bar.C") if fi != nil { t.Errorf("Bar.C should not exist") } } func TestEmbeddedSimple(t *testing.T) { type UUID [16]byte type MyID struct { UUID } type Item struct { ID MyID } z := Item{} m := NewMapper("db") m.TypeMap(reflect.TypeOf(z)) } func TestBasicEmbeddedWithTags(t *testing.T) { type Foo struct { A int `db:"a"` } type Bar struct { Foo // `db:""` is implied for an embedded struct B int `db:"b"` } type Baz struct { A int `db:"a"` Bar // `db:""` is implied for an embedded struct } m := NewMapper("db") z := Baz{} z.A = 1 z.B = 2 z.Bar.Foo.A = 3 zv := reflect.ValueOf(z) fields := m.TypeMap(reflect.TypeOf(z)) if len(fields.Index) != 5 { t.Errorf("Expecting 5 fields") } // for _, fi := range fields.index { // log.Println(fi) // } v := m.FieldByName(zv, "a") if ival(v) != z.A { // the dominant field t.Errorf("Expecting %d, got %d", z.A, ival(v)) } v = m.FieldByName(zv, "b") if ival(v) != z.B { t.Errorf("Expecting %d, got %d", z.B, ival(v)) } } func TestBasicEmbeddedWithSameName(t *testing.T) { type Foo struct { A int `db:"a"` Foo int `db:"Foo"` // Same name as the embedded struct } type FooExt struct { Foo B int `db:"b"` } m := NewMapper("db") z := FooExt{} z.A = 1 z.B = 2 z.Foo.Foo = 3 zv := reflect.ValueOf(z) fields := m.TypeMap(reflect.TypeOf(z)) if len(fields.Index) != 4 { t.Errorf("Expecting 3 fields, found %d", len(fields.Index)) } v := m.FieldByName(zv, "a") if ival(v) != z.A { // the dominant field t.Errorf("Expecting %d, got %d", z.A, ival(v)) } v = m.FieldByName(zv, "b") if ival(v) != z.B { t.Errorf("Expecting %d, got %d", z.B, ival(v)) } v = m.FieldByName(zv, "Foo") if ival(v) != z.Foo.Foo { t.Errorf("Expecting %d, got %d", z.Foo.Foo, ival(v)) } } func TestFlatTags(t *testing.T) { m := NewMapper("db") type Asset struct { Title string `db:"title"` } type Post struct { Author string `db:"author,required"` Asset Asset `db:""` } // Post columns: (author title) post := Post{Author: "Joe", Asset: Asset{Title: "Hello"}} pv := reflect.ValueOf(post) v := m.FieldByName(pv, "author") if v.Interface().(string) != post.Author { t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) } v = m.FieldByName(pv, "title") if v.Interface().(string) != post.Asset.Title { t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) } } func TestNestedStruct(t *testing.T) { m := NewMapper("db") type Details struct { Active bool `db:"active"` } type Asset struct { Title string `db:"title"` Details Details `db:"details"` } type Post struct { Author string `db:"author,required"` Asset `db:"asset"` } // Post columns: (author asset.title asset.details.active) post := Post{ Author: "Joe", Asset: Asset{Title: "Hello", Details: Details{Active: true}}, } pv := reflect.ValueOf(post) v := m.FieldByName(pv, "author") if v.Interface().(string) != post.Author { t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) } v = m.FieldByName(pv, "title") if _, ok := v.Interface().(string); ok { t.Errorf("Expecting field to not exist") } v = m.FieldByName(pv, "asset.title") if v.Interface().(string) != post.Asset.Title { t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) } v = m.FieldByName(pv, "asset.details.active") if v.Interface().(bool) != post.Asset.Details.Active { t.Errorf("Expecting %v, got %v", post.Asset.Details.Active, v.Interface().(bool)) } } func TestInlineStruct(t *testing.T) { m := NewMapperTagFunc("db", strings.ToLower, nil) type Employee struct { Name string ID int } type Boss Employee type person struct { Employee `db:"employee"` Boss `db:"boss"` } // employees columns: (employee.name employee.id boss.name boss.id) em := person{Employee: Employee{Name: "Joe", ID: 2}, Boss: Boss{Name: "Dick", ID: 1}} ev := reflect.ValueOf(em) fields := m.TypeMap(reflect.TypeOf(em)) if len(fields.Index) != 6 { t.Errorf("Expecting 6 fields") } v := m.FieldByName(ev, "employee.name") if v.Interface().(string) != em.Employee.Name { t.Errorf("Expecting %s, got %s", em.Employee.Name, v.Interface().(string)) } v = m.FieldByName(ev, "boss.id") if ival(v) != em.Boss.ID { t.Errorf("Expecting %v, got %v", em.Boss.ID, ival(v)) } } func TestRecursiveStruct(t *testing.T) { type Person struct { Parent *Person } m := NewMapperFunc("db", strings.ToLower) var p *Person m.TypeMap(reflect.TypeOf(p)) } func TestFieldsEmbedded(t *testing.T) { m := NewMapper("db") type Person struct { Name string `db:"name,size=64"` } type Place struct { Name string `db:"name"` } type Article struct { Title string `db:"title"` } type PP struct { Person `db:"person,required"` Place `db:",someflag"` Article `db:",required"` } // PP columns: (person.name name title) pp := PP{} pp.Person.Name = "Peter" pp.Place.Name = "Toronto" pp.Article.Title = "Best city ever" fields := m.TypeMap(reflect.TypeOf(pp)) // for i, f := range fields { // log.Println(i, f) // } ppv := reflect.ValueOf(pp) v := m.FieldByName(ppv, "person.name") if v.Interface().(string) != pp.Person.Name { t.Errorf("Expecting %s, got %s", pp.Person.Name, v.Interface().(string)) } v = m.FieldByName(ppv, "name") if v.Interface().(string) != pp.Place.Name { t.Errorf("Expecting %s, got %s", pp.Place.Name, v.Interface().(string)) } v = m.FieldByName(ppv, "title") if v.Interface().(string) != pp.Article.Title { t.Errorf("Expecting %s, got %s", pp.Article.Title, v.Interface().(string)) } fi := fields.GetByPath("person") if _, ok := fi.Options["required"]; !ok { t.Errorf("Expecting required option to be set") } if !fi.Embedded { t.Errorf("Expecting field to be embedded") } if len(fi.Index) != 1 || fi.Index[0] != 0 { t.Errorf("Expecting index to be [0]") } fi = fields.GetByPath("person.name") if fi == nil { t.Fatal("Expecting person.name to exist") } if fi.Path != "person.name" { t.Errorf("Expecting %s, got %s", "person.name", fi.Path) } if fi.Options["size"] != "64" { t.Errorf("Expecting %s, got %s", "64", fi.Options["size"]) } fi = fields.GetByTraversal([]int{1, 0}) if fi == nil { t.Fatal("Expecting traversal to exist") } if fi.Path != "name" { t.Errorf("Expecting %s, got %s", "name", fi.Path) } fi = fields.GetByTraversal([]int{2}) if fi == nil { t.Fatal("Expecting traversal to exist") } if _, ok := fi.Options["required"]; !ok { t.Errorf("Expecting required option to be set") } trs := m.TraversalsByName(reflect.TypeOf(pp), []string{"person.name", "name", "title"}) if !reflect.DeepEqual(trs, [][]int{{0, 0}, {1, 0}, {2, 0}}) { t.Errorf("Expecting traversal: %v", trs) } } func TestPtrFields(t *testing.T) { m := NewMapperTagFunc("db", strings.ToLower, nil) type Asset struct { Title string } type Post struct { *Asset `db:"asset"` Author string } post := &Post{Author: "Joe", Asset: &Asset{Title: "Hiyo"}} pv := reflect.ValueOf(post) fields := m.TypeMap(reflect.TypeOf(post)) if len(fields.Index) != 3 { t.Errorf("Expecting 3 fields") } v := m.FieldByName(pv, "asset.title") if v.Interface().(string) != post.Asset.Title { t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string)) } v = m.FieldByName(pv, "author") if v.Interface().(string) != post.Author { t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) } } func TestNamedPtrFields(t *testing.T) { m := NewMapperTagFunc("db", strings.ToLower, nil) type User struct { Name string } type Asset struct { Title string Owner *User `db:"owner"` } type Post struct { Author string Asset1 *Asset `db:"asset1"` Asset2 *Asset `db:"asset2"` } post := &Post{Author: "Joe", Asset1: &Asset{Title: "Hiyo", Owner: &User{"Username"}}} // Let Asset2 be nil pv := reflect.ValueOf(post) fields := m.TypeMap(reflect.TypeOf(post)) if len(fields.Index) != 9 { t.Errorf("Expecting 9 fields") } v := m.FieldByName(pv, "asset1.title") if v.Interface().(string) != post.Asset1.Title { t.Errorf("Expecting %s, got %s", post.Asset1.Title, v.Interface().(string)) } v = m.FieldByName(pv, "asset1.owner.name") if v.Interface().(string) != post.Asset1.Owner.Name { t.Errorf("Expecting %s, got %s", post.Asset1.Owner.Name, v.Interface().(string)) } v = m.FieldByName(pv, "asset2.title") if v.Interface().(string) != post.Asset2.Title { t.Errorf("Expecting %s, got %s", post.Asset2.Title, v.Interface().(string)) } v = m.FieldByName(pv, "asset2.owner.name") if v.Interface().(string) != post.Asset2.Owner.Name { t.Errorf("Expecting %s, got %s", post.Asset2.Owner.Name, v.Interface().(string)) } v = m.FieldByName(pv, "author") if v.Interface().(string) != post.Author { t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string)) } } func TestFieldMap(t *testing.T) { type Foo struct { A int B int C int } f := Foo{1, 2, 3} m := NewMapperFunc("db", strings.ToLower) fm := m.FieldMap(reflect.ValueOf(f)) if len(fm) != 3 { t.Errorf("Expecting %d keys, got %d", 3, len(fm)) } if fm["a"].Interface().(int) != 1 { t.Errorf("Expecting %d, got %d", 1, ival(fm["a"])) } if fm["b"].Interface().(int) != 2 { t.Errorf("Expecting %d, got %d", 2, ival(fm["b"])) } if fm["c"].Interface().(int) != 3 { t.Errorf("Expecting %d, got %d", 3, ival(fm["c"])) } } func TestTagNameMapping(t *testing.T) { type Strategy struct { StrategyID string `protobuf:"bytes,1,opt,name=strategy_id" json:"strategy_id,omitempty"` StrategyName string } m := NewMapperTagFunc("json", strings.ToUpper, func(value string) string { if strings.Contains(value, ",") { return strings.Split(value, ",")[0] } return value }) strategy := Strategy{"1", "Alpah"} mapping := m.TypeMap(reflect.TypeOf(strategy)) for _, key := range []string{"strategy_id", "STRATEGYNAME"} { if fi := mapping.GetByPath(key); fi == nil { t.Errorf("Expecting to find key %s in mapping but did not.", key) } } } func TestMapping(t *testing.T) { type Person struct { ID int Name string WearsGlasses bool `db:"wears_glasses"` } m := NewMapperFunc("db", strings.ToLower) p := Person{1, "Jason", true} mapping := m.TypeMap(reflect.TypeOf(p)) for _, key := range []string{"id", "name", "wears_glasses"} { if fi := mapping.GetByPath(key); fi == nil { t.Errorf("Expecting to find key %s in mapping but did not.", key) } } type SportsPerson struct { Weight int Age int Person } s := SportsPerson{Weight: 100, Age: 30, Person: p} mapping = m.TypeMap(reflect.TypeOf(s)) for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} { if fi := mapping.GetByPath(key); fi == nil { t.Errorf("Expecting to find key %s in mapping but did not.", key) } } type RugbyPlayer struct { Position int IsIntense bool `db:"is_intense"` IsAllBlack bool `db:"-"` SportsPerson } r := RugbyPlayer{12, true, false, s} mapping = m.TypeMap(reflect.TypeOf(r)) for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} { if fi := mapping.GetByPath(key); fi == nil { t.Errorf("Expecting to find key %s in mapping but did not.", key) } } if fi := mapping.GetByPath("isallblack"); fi != nil { t.Errorf("Expecting to ignore `IsAllBlack` field") } } func TestGetByTraversal(t *testing.T) { type C struct { C0 int C1 int } type B struct { B0 string B1 *C } type A struct { A0 int A1 B } testCases := []struct { Index []int ExpectedName string ExpectNil bool }{ { Index: []int{0}, ExpectedName: "A0", }, { Index: []int{1, 0}, ExpectedName: "B0", }, { Index: []int{1, 1, 1}, ExpectedName: "C1", }, { Index: []int{3, 4, 5}, ExpectNil: true, }, { Index: []int{}, ExpectNil: true, }, { Index: nil, ExpectNil: true, }, } m := NewMapperFunc("db", func(n string) string { return n }) tm := m.TypeMap(reflect.TypeOf(A{})) for i, tc := range testCases { fi := tm.GetByTraversal(tc.Index) if tc.ExpectNil { if fi != nil { t.Errorf("%d: expected nil, got %v", i, fi) } continue } if fi == nil { t.Errorf("%d: expected %s, got nil", i, tc.ExpectedName) continue } if fi.Name != tc.ExpectedName { t.Errorf("%d: expected %s, got %s", i, tc.ExpectedName, fi.Name) } } } // TestMapperMethodsByName tests Mapper methods FieldByName and TraversalsByName func TestMapperMethodsByName(t *testing.T) { type C struct { C0 string C1 int } type B struct { B0 *C `db:"B0"` B1 C `db:"B1"` B2 string `db:"B2"` } type A struct { A0 *B `db:"A0"` B `db:"A1"` A2 int } val := &A{ A0: &B{ B0: &C{C0: "0", C1: 1}, B1: C{C0: "2", C1: 3}, B2: "4", }, B: B{ B0: nil, B1: C{C0: "5", C1: 6}, B2: "7", }, A2: 8, } testCases := []struct { Name string ExpectInvalid bool ExpectedValue interface{} ExpectedIndexes []int }{ { Name: "A0.B0.C0", ExpectedValue: "0", ExpectedIndexes: []int{0, 0, 0}, }, { Name: "A0.B0.C1", ExpectedValue: 1, ExpectedIndexes: []int{0, 0, 1}, }, { Name: "A0.B1.C0", ExpectedValue: "2", ExpectedIndexes: []int{0, 1, 0}, }, { Name: "A0.B1.C1", ExpectedValue: 3, ExpectedIndexes: []int{0, 1, 1}, }, { Name: "A0.B2", ExpectedValue: "4", ExpectedIndexes: []int{0, 2}, }, { Name: "A1.B0.C0", ExpectedValue: "", ExpectedIndexes: []int{1, 0, 0}, }, { Name: "A1.B0.C1", ExpectedValue: 0, ExpectedIndexes: []int{1, 0, 1}, }, { Name: "A1.B1.C0", ExpectedValue: "5", ExpectedIndexes: []int{1, 1, 0}, }, { Name: "A1.B1.C1", ExpectedValue: 6, ExpectedIndexes: []int{1, 1, 1}, }, { Name: "A1.B2", ExpectedValue: "7", ExpectedIndexes: []int{1, 2}, }, { Name: "A2", ExpectedValue: 8, ExpectedIndexes: []int{2}, }, { Name: "XYZ", ExpectInvalid: true, ExpectedIndexes: []int{}, }, { Name: "a3", ExpectInvalid: true, ExpectedIndexes: []int{}, }, } // build the names array from the test cases names := make([]string, len(testCases)) for i, tc := range testCases { names[i] = tc.Name } m := NewMapperFunc("db", func(n string) string { return n }) v := reflect.ValueOf(val) values := m.FieldsByName(v, names) if len(values) != len(testCases) { t.Errorf("expected %d values, got %d", len(testCases), len(values)) t.FailNow() } indexes := m.TraversalsByName(v.Type(), names) if len(indexes) != len(testCases) { t.Errorf("expected %d traversals, got %d", len(testCases), len(indexes)) t.FailNow() } for i, val := range values { tc := testCases[i] traversal := indexes[i] if !reflect.DeepEqual(tc.ExpectedIndexes, traversal) { t.Errorf("expected %v, got %v", tc.ExpectedIndexes, traversal) t.FailNow() } val = reflect.Indirect(val) if tc.ExpectInvalid { if val.IsValid() { t.Errorf("%d: expected zero value, got %v", i, val) } continue } if !val.IsValid() { t.Errorf("%d: expected valid value, got %v", i, val) continue } actualValue := reflect.Indirect(val).Interface() if !reflect.DeepEqual(tc.ExpectedValue, actualValue) { t.Errorf("%d: expected %v, got %v", i, tc.ExpectedValue, actualValue) } } } func TestFieldByIndexes(t *testing.T) { type C struct { C0 bool C1 string C2 int C3 map[string]int } type B struct { B1 C B2 *C } type A struct { A1 B A2 *B } testCases := []struct { value interface{} indexes []int expectedValue interface{} readOnly bool }{ { value: A{ A1: B{B1: C{C0: true}}, }, indexes: []int{0, 0, 0}, expectedValue: true, readOnly: true, }, { value: A{ A2: &B{B2: &C{C1: "answer"}}, }, indexes: []int{1, 1, 1}, expectedValue: "answer", readOnly: true, }, { value: &A{}, indexes: []int{1, 1, 3}, expectedValue: map[string]int{}, }, } for i, tc := range testCases { checkResults := func(v reflect.Value) { if tc.expectedValue == nil { if !v.IsNil() { t.Errorf("%d: expected nil, actual %v", i, v.Interface()) } } else { if !reflect.DeepEqual(tc.expectedValue, v.Interface()) { t.Errorf("%d: expected %v, actual %v", i, tc.expectedValue, v.Interface()) } } } checkResults(FieldByIndexes(reflect.ValueOf(tc.value), tc.indexes)) if tc.readOnly { checkResults(FieldByIndexesReadOnly(reflect.ValueOf(tc.value), tc.indexes)) } } } func TestMustBe(t *testing.T) { typ := reflect.TypeOf(E1{}) mustBe(typ, reflect.Struct) defer func() { if r := recover(); r != nil { valueErr, ok := r.(*reflect.ValueError) if !ok { t.Errorf("unexpected Method: %s", valueErr.Method) t.Fatal("expected panic with *reflect.ValueError") } if valueErr.Method != "github.com/jmoiron/sqlx/reflectx.TestMustBe" { t.Fatalf("unexpected Method: %s", valueErr.Method) } if valueErr.Kind != reflect.String { t.Fatalf("unexpected Kind: %s", valueErr.Kind) } } else { t.Fatal("expected panic") } }() typ = reflect.TypeOf("string") mustBe(typ, reflect.Struct) t.Fatal("got here, didn't expect to") } type E1 struct { A int } type E2 struct { E1 B int } type E3 struct { E2 C int } type E4 struct { E3 D int } func BenchmarkFieldNameL1(b *testing.B) { e4 := E4{D: 1} for i := 0; i < b.N; i++ { v := reflect.ValueOf(e4) f := v.FieldByName("D") if f.Interface().(int) != 1 { b.Fatal("Wrong value.") } } } func BenchmarkFieldNameL4(b *testing.B) { e4 := E4{} e4.A = 1 for i := 0; i < b.N; i++ { v := reflect.ValueOf(e4) f := v.FieldByName("A") if f.Interface().(int) != 1 { b.Fatal("Wrong value.") } } } func BenchmarkFieldPosL1(b *testing.B) { e4 := E4{D: 1} for i := 0; i < b.N; i++ { v := reflect.ValueOf(e4) f := v.Field(1) if f.Interface().(int) != 1 { b.Fatal("Wrong value.") } } } func BenchmarkFieldPosL4(b *testing.B) { e4 := E4{} e4.A = 1 for i := 0; i < b.N; i++ { v := reflect.ValueOf(e4) f := v.Field(0) f = f.Field(0) f = f.Field(0) f = f.Field(0) if f.Interface().(int) != 1 { b.Fatal("Wrong value.") } } } func BenchmarkFieldByIndexL4(b *testing.B) { e4 := E4{} e4.A = 1 idx := []int{0, 0, 0, 0} for i := 0; i < b.N; i++ { v := reflect.ValueOf(e4) f := FieldByIndexes(v, idx) if f.Interface().(int) != 1 { b.Fatal("Wrong value.") } } } func BenchmarkTraversalsByName(b *testing.B) { type A struct { Value int } type B struct { A A } type C struct { B B } type D struct { C C } m := NewMapper("") t := reflect.TypeOf(D{}) names := []string{"C", "B", "A", "Value"} b.ResetTimer() for i := 0; i < b.N; i++ { if l := len(m.TraversalsByName(t, names)); l != len(names) { b.Errorf("expected %d values, got %d", len(names), l) } } } func BenchmarkTraversalsByNameFunc(b *testing.B) { type A struct { Z int } type B struct { A A } type C struct { B B } type D struct { C C } m := NewMapper("") t := reflect.TypeOf(D{}) names := []string{"C", "B", "A", "Z", "Y"} b.ResetTimer() for i := 0; i < b.N; i++ { var l int if err := m.TraversalsByNameFunc(t, names, func(_ int, _ []int) error { l++ return nil }); err != nil { b.Errorf("unexpected error %s", err) } if l != len(names) { b.Errorf("expected %d values, got %d", len(names), l) } } } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/reflectx/reflect.go0000644000000000000000000003045715024302467024717 0ustar rootroot// Package reflectx implements extensions to the standard reflect lib suitable // for implementing marshalling and unmarshalling packages. The main Mapper type // allows for Go-compatible named attribute access, including accessing embedded // struct attributes and the ability to use functions and struct tags to // customize field names. package reflectx import ( "reflect" "runtime" "strings" "sync" ) // A FieldInfo is metadata for a struct field. type FieldInfo struct { Index []int Path string Field reflect.StructField Zero reflect.Value Name string Options map[string]string Embedded bool Children []*FieldInfo Parent *FieldInfo } // A StructMap is an index of field metadata for a struct. type StructMap struct { Tree *FieldInfo Index []*FieldInfo Paths map[string]*FieldInfo Names map[string]*FieldInfo } // GetByPath returns a *FieldInfo for a given string path. func (f StructMap) GetByPath(path string) *FieldInfo { return f.Paths[path] } // GetByTraversal returns a *FieldInfo for a given integer path. It is // analogous to reflect.FieldByIndex, but using the cached traversal // rather than re-executing the reflect machinery each time. func (f StructMap) GetByTraversal(index []int) *FieldInfo { if len(index) == 0 { return nil } tree := f.Tree for _, i := range index { if i >= len(tree.Children) || tree.Children[i] == nil { return nil } tree = tree.Children[i] } return tree } // Mapper is a general purpose mapper of names to struct fields. A Mapper // behaves like most marshallers in the standard library, obeying a field tag // for name mapping but also providing a basic transform function. type Mapper struct { cache map[reflect.Type]*StructMap tagName string tagMapFunc func(string) string mapFunc func(string) string mutex sync.Mutex } // NewMapper returns a new mapper using the tagName as its struct field tag. // If tagName is the empty string, it is ignored. func NewMapper(tagName string) *Mapper { return &Mapper{ cache: make(map[reflect.Type]*StructMap), tagName: tagName, } } // NewMapperTagFunc returns a new mapper which contains a mapper for field names // AND a mapper for tag values. This is useful for tags like json which can // have values like "name,omitempty". func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { return &Mapper{ cache: make(map[reflect.Type]*StructMap), tagName: tagName, mapFunc: mapFunc, tagMapFunc: tagMapFunc, } } // NewMapperFunc returns a new mapper which optionally obeys a field tag and // a struct field name mapper func given by f. Tags will take precedence, but // for any other field, the mapped name will be f(field.Name) func NewMapperFunc(tagName string, f func(string) string) *Mapper { return &Mapper{ cache: make(map[reflect.Type]*StructMap), tagName: tagName, mapFunc: f, } } // TypeMap returns a mapping of field strings to int slices representing // the traversal down the struct to reach the field. func (m *Mapper) TypeMap(t reflect.Type) *StructMap { m.mutex.Lock() mapping, ok := m.cache[t] if !ok { mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) m.cache[t] = mapping } m.mutex.Unlock() return mapping } // FieldMap returns the mapper's mapping of field names to reflect values. Panics // if v's Kind is not Struct, or v is not Indirectable to a struct kind. func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { v = reflect.Indirect(v) mustBe(v, reflect.Struct) r := map[string]reflect.Value{} tm := m.TypeMap(v.Type()) for tagName, fi := range tm.Names { r[tagName] = FieldByIndexes(v, fi.Index) } return r } // FieldByName returns a field by its mapped name as a reflect.Value. // Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. // Returns zero Value if the name is not found. func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { v = reflect.Indirect(v) mustBe(v, reflect.Struct) tm := m.TypeMap(v.Type()) fi, ok := tm.Names[name] if !ok { return v } return FieldByIndexes(v, fi.Index) } // FieldsByName returns a slice of values corresponding to the slice of names // for the value. Panics if v's Kind is not Struct or v is not Indirectable // to a struct Kind. Returns zero Value for each name not found. func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { v = reflect.Indirect(v) mustBe(v, reflect.Struct) tm := m.TypeMap(v.Type()) vals := make([]reflect.Value, 0, len(names)) for _, name := range names { fi, ok := tm.Names[name] if !ok { vals = append(vals, *new(reflect.Value)) } else { vals = append(vals, FieldByIndexes(v, fi.Index)) } } return vals } // TraversalsByName returns a slice of int slices which represent the struct // traversals for each mapped name. Panics if t is not a struct or Indirectable // to a struct. Returns empty int slice for each name not found. func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { r := make([][]int, 0, len(names)) m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { if i == nil { r = append(r, []int{}) } else { r = append(r, i) } return nil }) return r } // TraversalsByNameFunc traverses the mapped names and calls fn with the index of // each name and the struct traversal represented by that name. Panics if t is not // a struct or Indirectable to a struct. Returns the first error returned by fn or nil. func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { t = Deref(t) mustBe(t, reflect.Struct) tm := m.TypeMap(t) for i, name := range names { fi, ok := tm.Names[name] if !ok { if err := fn(i, nil); err != nil { return err } } else { if err := fn(i, fi.Index); err != nil { return err } } } return nil } // FieldByIndexes returns a value for the field given by the struct traversal // for the given value. func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { for _, i := range indexes { v = reflect.Indirect(v).Field(i) // if this is a pointer and it's nil, allocate a new value and set it if v.Kind() == reflect.Ptr && v.IsNil() { alloc := reflect.New(Deref(v.Type())) v.Set(alloc) } if v.Kind() == reflect.Map && v.IsNil() { v.Set(reflect.MakeMap(v.Type())) } } return v } // FieldByIndexesReadOnly returns a value for a particular struct traversal, // but is not concerned with allocating nil pointers because the value is // going to be used for reading and not setting. func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { for _, i := range indexes { v = reflect.Indirect(v).Field(i) } return v } // Deref is Indirect for reflect.Types func Deref(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr { t = t.Elem() } return t } // -- helpers & utilities -- type kinder interface { Kind() reflect.Kind } // mustBe checks a value against a kind, panicing with a reflect.ValueError // if the kind isn't that which is required. func mustBe(v kinder, expected reflect.Kind) { if k := v.Kind(); k != expected { panic(&reflect.ValueError{Method: methodName(), Kind: k}) } } // methodName returns the caller of the function calling methodName func methodName() string { pc, _, _, _ := runtime.Caller(2) f := runtime.FuncForPC(pc) if f == nil { return "unknown method" } return f.Name() } type typeQueue struct { t reflect.Type fi *FieldInfo pp string // Parent path } // A copying append that creates a new slice each time. func apnd(is []int, i int) []int { x := make([]int, len(is)+1) copy(x, is) x[len(x)-1] = i return x } type mapf func(string) string // parseName parses the tag and the target name for the given field using // the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the // field's name to a target name, and tagMapFunc for mapping the tag to // a target name. func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { // first, set the fieldName to the field's name fieldName = field.Name // if a mapFunc is set, use that to override the fieldName if mapFunc != nil { fieldName = mapFunc(fieldName) } // if there's no tag to look for, return the field name if tagName == "" { return "", fieldName } // if this tag is not set using the normal convention in the tag, // then return the fieldname.. this check is done because according // to the reflect documentation: // If the tag does not have the conventional format, // the value returned by Get is unspecified. // which doesn't sound great. if !strings.Contains(string(field.Tag), tagName+":") { return "", fieldName } // at this point we're fairly sure that we have a tag, so lets pull it out tag = field.Tag.Get(tagName) // if we have a mapper function, call it on the whole tag // XXX: this is a change from the old version, which pulled out the name // before the tagMapFunc could be run, but I think this is the right way if tagMapFunc != nil { tag = tagMapFunc(tag) } // finally, split the options from the name parts := strings.Split(tag, ",") fieldName = parts[0] return tag, fieldName } // parseOptions parses options out of a tag string, skipping the name func parseOptions(tag string) map[string]string { parts := strings.Split(tag, ",") options := make(map[string]string, len(parts)) if len(parts) > 1 { for _, opt := range parts[1:] { // short circuit potentially expensive split op if strings.Contains(opt, "=") { kv := strings.Split(opt, "=") options[kv[0]] = kv[1] continue } options[opt] = "" } } return options } // getMapping returns a mapping for the t type, using the tagName, mapFunc and // tagMapFunc to determine the canonical names of fields. func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { m := []*FieldInfo{} root := &FieldInfo{} queue := []typeQueue{} queue = append(queue, typeQueue{Deref(t), root, ""}) QueueLoop: for len(queue) != 0 { // pop the first item off of the queue tq := queue[0] queue = queue[1:] // ignore recursive field for p := tq.fi.Parent; p != nil; p = p.Parent { if tq.fi.Field.Type == p.Field.Type { continue QueueLoop } } nChildren := 0 if tq.t.Kind() == reflect.Struct { nChildren = tq.t.NumField() } tq.fi.Children = make([]*FieldInfo, nChildren) // iterate through all of its fields for fieldPos := 0; fieldPos < nChildren; fieldPos++ { f := tq.t.Field(fieldPos) // parse the tag and the target name using the mapping options for this field tag, name := parseName(f, tagName, mapFunc, tagMapFunc) // if the name is "-", disabled via a tag, skip it if name == "-" { continue } fi := FieldInfo{ Field: f, Name: name, Zero: reflect.New(f.Type).Elem(), Options: parseOptions(tag), } // if the path is empty this path is just the name if tq.pp == "" { fi.Path = fi.Name } else { fi.Path = tq.pp + "." + fi.Name } // skip unexported fields if len(f.PkgPath) != 0 && !f.Anonymous { continue } // bfs search of anonymous embedded structs if f.Anonymous { pp := tq.pp if tag != "" { pp = fi.Path } fi.Embedded = true fi.Index = apnd(tq.fi.Index, fieldPos) nChildren := 0 ft := Deref(f.Type) if ft.Kind() == reflect.Struct { nChildren = ft.NumField() } fi.Children = make([]*FieldInfo, nChildren) queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { fi.Index = apnd(tq.fi.Index, fieldPos) fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) } fi.Index = apnd(tq.fi.Index, fieldPos) fi.Parent = tq.fi tq.fi.Children[fieldPos] = &fi m = append(m, &fi) } } flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} for _, fi := range flds.Index { // check if nothing has already been pushed with the same path // sometimes you can choose to override a type using embedded struct fld, ok := flds.Paths[fi.Path] if !ok || fld.Embedded { flds.Paths[fi.Path] = fi if fi.Name != "" && !fi.Embedded { flds.Names[fi.Path] = fi } } } return flds } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/sqlx.go0000644000000000000000000007544615024302467022455 0ustar rootrootpackage sqlx import ( "database/sql" "database/sql/driver" "errors" "fmt" "io/ioutil" "path/filepath" "reflect" "strings" "sync" "github.com/jmoiron/sqlx/reflectx" ) // Although the NameMapper is convenient, in practice it should not // be relied on except for application code. If you are writing a library // that uses sqlx, you should be aware that the name mappings you expect // can be overridden by your user's application. // NameMapper is used to map column names to struct field names. By default, // it uses strings.ToLower to lowercase struct field names. It can be set // to whatever you want, but it is encouraged to be set before sqlx is used // as name-to-field mappings are cached after first use on a type. var NameMapper = strings.ToLower var origMapper = reflect.ValueOf(NameMapper) // Rather than creating on init, this is created when necessary so that // importers have time to customize the NameMapper. var mpr *reflectx.Mapper // mprMu protects mpr. var mprMu sync.Mutex // mapper returns a valid mapper using the configured NameMapper func. func mapper() *reflectx.Mapper { mprMu.Lock() defer mprMu.Unlock() if mpr == nil { mpr = reflectx.NewMapperFunc("db", NameMapper) } else if origMapper != reflect.ValueOf(NameMapper) { // if NameMapper has changed, create a new mapper mpr = reflectx.NewMapperFunc("db", NameMapper) origMapper = reflect.ValueOf(NameMapper) } return mpr } // isScannable takes the reflect.Type and the actual dest value and returns // whether or not it's Scannable. Something is scannable if: // - it is not a struct // - it implements sql.Scanner // - it has no exported fields func isScannable(t reflect.Type) bool { if reflect.PtrTo(t).Implements(_scannerInterface) { return true } if t.Kind() != reflect.Struct { return true } // it's not important that we use the right mapper for this particular object, // we're only concerned on how many exported fields this struct has return len(mapper().TypeMap(t).Index) == 0 } // ColScanner is an interface used by MapScan and SliceScan type ColScanner interface { Columns() ([]string, error) Scan(dest ...interface{}) error Err() error } // Queryer is an interface used by Get and Select type Queryer interface { Query(query string, args ...interface{}) (*sql.Rows, error) Queryx(query string, args ...interface{}) (*Rows, error) QueryRowx(query string, args ...interface{}) *Row } // Execer is an interface used by MustExec and LoadFile type Execer interface { Exec(query string, args ...interface{}) (sql.Result, error) } // Binder is an interface for something which can bind queries (Tx, DB) type binder interface { DriverName() string Rebind(string) string BindNamed(string, interface{}) (string, []interface{}, error) } // Ext is a union interface which can bind, query, and exec, used by // NamedQuery and NamedExec. type Ext interface { binder Queryer Execer } // Preparer is an interface used by Preparex. type Preparer interface { Prepare(query string) (*sql.Stmt, error) } // determine if any of our extensions are unsafe func isUnsafe(i interface{}) bool { switch v := i.(type) { case Row: return v.unsafe case *Row: return v.unsafe case Rows: return v.unsafe case *Rows: return v.unsafe case NamedStmt: return v.Stmt.unsafe case *NamedStmt: return v.Stmt.unsafe case Stmt: return v.unsafe case *Stmt: return v.unsafe case qStmt: return v.unsafe case *qStmt: return v.unsafe case DB: return v.unsafe case *DB: return v.unsafe case Tx: return v.unsafe case *Tx: return v.unsafe case sql.Rows, *sql.Rows: return false default: return false } } func mapperFor(i interface{}) *reflectx.Mapper { switch i := i.(type) { case DB: return i.Mapper case *DB: return i.Mapper case Tx: return i.Mapper case *Tx: return i.Mapper default: return mapper() } } var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() //lint:ignore U1000 ignoring this for now var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() // Row is a reimplementation of sql.Row in order to gain access to the underlying // sql.Rows.Columns() data, necessary for StructScan. type Row struct { err error unsafe bool rows *sql.Rows Mapper *reflectx.Mapper } // Scan is a fixed implementation of sql.Row.Scan, which does not discard the // underlying error from the internal rows object if it exists. func (r *Row) Scan(dest ...interface{}) error { if r.err != nil { return r.err } // TODO(bradfitz): for now we need to defensively clone all // []byte that the driver returned (not permitting // *RawBytes in Rows.Scan), since we're about to close // the Rows in our defer, when we return from this function. // the contract with the driver.Next(...) interface is that it // can return slices into read-only temporary memory that's // only valid until the next Scan/Close. But the TODO is that // for a lot of drivers, this copy will be unnecessary. We // should provide an optional interface for drivers to // implement to say, "don't worry, the []bytes that I return // from Next will not be modified again." (for instance, if // they were obtained from the network anyway) But for now we // don't care. defer r.rows.Close() for _, dp := range dest { if _, ok := dp.(*sql.RawBytes); ok { return errors.New("sql: RawBytes isn't allowed on Row.Scan") } } if !r.rows.Next() { if err := r.rows.Err(); err != nil { return err } return sql.ErrNoRows } err := r.rows.Scan(dest...) if err != nil { return err } // Make sure the query can be processed to completion with no errors. if err := r.rows.Close(); err != nil { return err } return nil } // Columns returns the underlying sql.Rows.Columns(), or the deferred error usually // returned by Row.Scan() func (r *Row) Columns() ([]string, error) { if r.err != nil { return []string{}, r.err } return r.rows.Columns() } // ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { if r.err != nil { return []*sql.ColumnType{}, r.err } return r.rows.ColumnTypes() } // Err returns the error encountered while scanning. func (r *Row) Err() error { return r.err } // DB is a wrapper around sql.DB which keeps track of the driverName upon Open, // used mostly to automatically bind named queries using the right bindvars. type DB struct { *sql.DB driverName string unsafe bool Mapper *reflectx.Mapper } // NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The // driverName of the original database is required for named query support. // //lint:ignore ST1003 changing this would break the package interface. func NewDb(db *sql.DB, driverName string) *DB { return &DB{DB: db, driverName: driverName, Mapper: mapper()} } // DriverName returns the driverName passed to the Open function for this DB. func (db *DB) DriverName() string { return db.driverName } // Open is the same as sql.Open, but returns an *sqlx.DB instead. func Open(driverName, dataSourceName string) (*DB, error) { db, err := sql.Open(driverName, dataSourceName) if err != nil { return nil, err } return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err } // MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. func MustOpen(driverName, dataSourceName string) *DB { db, err := Open(driverName, dataSourceName) if err != nil { panic(err) } return db } // MapperFunc sets a new mapper for this db using the default sqlx struct tag // and the provided mapper function. func (db *DB) MapperFunc(mf func(string) string) { db.Mapper = reflectx.NewMapperFunc("db", mf) } // Rebind transforms a query from QUESTION to the DB driver's bindvar type. func (db *DB) Rebind(query string) string { return Rebind(BindType(db.driverName), query) } // Unsafe returns a version of DB which will silently succeed to scan when // columns in the SQL result have no fields in the destination struct. // sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its // safety behavior. func (db *DB) Unsafe() *DB { return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} } // BindNamed binds a query using the DB driver's bindvar type. func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) } // NamedQuery using this DB. // Any named placeholder parameters are replaced with fields from arg. func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { return NamedQuery(db, query, arg) } // NamedExec using this DB. // Any named placeholder parameters are replaced with fields from arg. func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { return NamedExec(db, query, arg) } // Select using this DB. // Any placeholder parameters are replaced with supplied args. func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { return Select(db, dest, query, args...) } // Get using this DB. // Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { return Get(db, dest, query, args...) } // MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead // of an *sql.Tx. func (db *DB) MustBegin() *Tx { tx, err := db.Beginx() if err != nil { panic(err) } return tx } // Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. func (db *DB) Beginx() (*Tx, error) { tx, err := db.DB.Begin() if err != nil { return nil, err } return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err } // Queryx queries the database and returns an *sqlx.Rows. // Any placeholder parameters are replaced with supplied args. func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { r, err := db.DB.Query(query, args...) if err != nil { return nil, err } return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err } // QueryRowx queries the database and returns an *sqlx.Row. // Any placeholder parameters are replaced with supplied args. func (db *DB) QueryRowx(query string, args ...interface{}) *Row { rows, err := db.DB.Query(query, args...) return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} } // MustExec (panic) runs MustExec using this database. // Any placeholder parameters are replaced with supplied args. func (db *DB) MustExec(query string, args ...interface{}) sql.Result { return MustExec(db, query, args...) } // Preparex returns an sqlx.Stmt instead of a sql.Stmt func (db *DB) Preparex(query string) (*Stmt, error) { return Preparex(db, query) } // PrepareNamed returns an sqlx.NamedStmt func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { return prepareNamed(db, query) } // Conn is a wrapper around sql.Conn with extra functionality type Conn struct { *sql.Conn driverName string unsafe bool Mapper *reflectx.Mapper } // Tx is an sqlx wrapper around sql.Tx with extra functionality type Tx struct { *sql.Tx driverName string unsafe bool Mapper *reflectx.Mapper } // DriverName returns the driverName used by the DB which began this transaction. func (tx *Tx) DriverName() string { return tx.driverName } // Rebind a query within a transaction's bindvar type. func (tx *Tx) Rebind(query string) string { return Rebind(BindType(tx.driverName), query) } // Unsafe returns a version of Tx which will silently succeed to scan when // columns in the SQL result have no fields in the destination struct. func (tx *Tx) Unsafe() *Tx { return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} } // BindNamed binds a query within a transaction's bindvar type. func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) } // NamedQuery within a transaction. // Any named placeholder parameters are replaced with fields from arg. func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { return NamedQuery(tx, query, arg) } // NamedExec a named query within a transaction. // Any named placeholder parameters are replaced with fields from arg. func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { return NamedExec(tx, query, arg) } // Select within a transaction. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { return Select(tx, dest, query, args...) } // Queryx within a transaction. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { r, err := tx.Tx.Query(query, args...) if err != nil { return nil, err } return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err } // QueryRowx within a transaction. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { rows, err := tx.Tx.Query(query, args...) return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} } // Get within a transaction. // Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { return Get(tx, dest, query, args...) } // MustExec runs MustExec within a transaction. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { return MustExec(tx, query, args...) } // Preparex a statement within a transaction. func (tx *Tx) Preparex(query string) (*Stmt, error) { return Preparex(tx, query) } // Stmtx returns a version of the prepared statement which runs within a transaction. Provided // stmt can be either *sql.Stmt or *sqlx.Stmt. func (tx *Tx) Stmtx(stmt interface{}) *Stmt { var s *sql.Stmt switch v := stmt.(type) { case Stmt: s = v.Stmt case *Stmt: s = v.Stmt case *sql.Stmt: s = v default: panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) } return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} } // NamedStmt returns a version of the prepared statement which runs within a transaction. func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { return &NamedStmt{ QueryString: stmt.QueryString, Params: stmt.Params, Stmt: tx.Stmtx(stmt.Stmt), } } // PrepareNamed returns an sqlx.NamedStmt func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { return prepareNamed(tx, query) } // Stmt is an sqlx wrapper around sql.Stmt with extra functionality type Stmt struct { *sql.Stmt unsafe bool Mapper *reflectx.Mapper } // Unsafe returns a version of Stmt which will silently succeed to scan when // columns in the SQL result have no fields in the destination struct. func (s *Stmt) Unsafe() *Stmt { return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} } // Select using the prepared statement. // Any placeholder parameters are replaced with supplied args. func (s *Stmt) Select(dest interface{}, args ...interface{}) error { return Select(&qStmt{s}, dest, "", args...) } // Get using the prepared statement. // Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func (s *Stmt) Get(dest interface{}, args ...interface{}) error { return Get(&qStmt{s}, dest, "", args...) } // MustExec (panic) using this statement. Note that the query portion of the error // output will be blank, as Stmt does not expose its query. // Any placeholder parameters are replaced with supplied args. func (s *Stmt) MustExec(args ...interface{}) sql.Result { return MustExec(&qStmt{s}, "", args...) } // QueryRowx using this statement. // Any placeholder parameters are replaced with supplied args. func (s *Stmt) QueryRowx(args ...interface{}) *Row { qs := &qStmt{s} return qs.QueryRowx("", args...) } // Queryx using this statement. // Any placeholder parameters are replaced with supplied args. func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { qs := &qStmt{s} return qs.Queryx("", args...) } // qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by // implementing those interfaces and ignoring the `query` argument. type qStmt struct{ *Stmt } func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { return q.Stmt.Query(args...) } func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { r, err := q.Stmt.Query(args...) if err != nil { return nil, err } return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err } func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { rows, err := q.Stmt.Query(args...) return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} } func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { return q.Stmt.Exec(args...) } // Rows is a wrapper around sql.Rows which caches costly reflect operations // during a looped StructScan type Rows struct { *sql.Rows unsafe bool Mapper *reflectx.Mapper // these fields cache memory use for a rows during iteration w/ structScan started bool fields [][]int values []interface{} } // SliceScan using this Rows. func (r *Rows) SliceScan() ([]interface{}, error) { return SliceScan(r) } // MapScan using this Rows. func (r *Rows) MapScan(dest map[string]interface{}) error { return MapScan(r, dest) } // StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. // Use this and iterate over Rows manually when the memory load of Select() might be // prohibitive. *Rows.StructScan caches the reflect work of matching up column // positions to fields to avoid that overhead per scan, which means it is not safe // to run StructScan on the same Rows instance with different struct types. func (r *Rows) StructScan(dest interface{}) error { v := reflect.ValueOf(dest) if v.Kind() != reflect.Ptr { return errors.New("must pass a pointer, not a value, to StructScan destination") } v = v.Elem() if !r.started { columns, err := r.Columns() if err != nil { return err } m := r.Mapper r.fields = m.TraversalsByName(v.Type(), columns) // if we are not unsafe and are missing fields, return an error if f, err := missingFields(r.fields); err != nil && !r.unsafe { return fmt.Errorf("missing destination name %s in %T", columns[f], dest) } r.values = make([]interface{}, len(columns)) r.started = true } err := fieldsByTraversal(v, r.fields, r.values, true) if err != nil { return err } // scan into the struct field pointers and append to our results err = r.Scan(r.values...) if err != nil { return err } return r.Err() } // Connect to a database and verify with a ping. func Connect(driverName, dataSourceName string) (*DB, error) { db, err := Open(driverName, dataSourceName) if err != nil { return nil, err } err = db.Ping() if err != nil { db.Close() return nil, err } return db, nil } // MustConnect connects to a database and panics on error. func MustConnect(driverName, dataSourceName string) *DB { db, err := Connect(driverName, dataSourceName) if err != nil { panic(err) } return db } // Preparex prepares a statement. func Preparex(p Preparer, query string) (*Stmt, error) { s, err := p.Prepare(query) if err != nil { return nil, err } return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err } // Select executes a query using the provided Queryer, and StructScans each row // into dest, which must be a slice. If the slice elements are scannable, then // the result set must have only one column. Otherwise, StructScan is used. // The *sql.Rows are closed automatically. // Any placeholder parameters are replaced with supplied args. func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { rows, err := q.Queryx(query, args...) if err != nil { return err } // if something happens here, we want to make sure the rows are Closed defer rows.Close() return scanAll(rows, dest, false) } // Get does a QueryRow using the provided Queryer, and scans the resulting row // to dest. If dest is scannable, the result must only have one column. Otherwise, // StructScan is used. Get will return sql.ErrNoRows like row.Scan would. // Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { r := q.QueryRowx(query, args...) return r.scanAny(dest, false) } // LoadFile exec's every statement in a file (as a single call to Exec). // LoadFile may return a nil *sql.Result if errors are encountered locating or // reading the file at path. LoadFile reads the entire file into memory, so it // is not suitable for loading large data dumps, but can be useful for initializing // schemas or loading indexes. // // FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 // or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting // this by requiring something with DriverName() and then attempting to split the // queries will be difficult to get right, and its current driver-specific behavior // is deemed at least not complex in its incorrectness. func LoadFile(e Execer, path string) (*sql.Result, error) { realpath, err := filepath.Abs(path) if err != nil { return nil, err } contents, err := ioutil.ReadFile(realpath) if err != nil { return nil, err } res, err := e.Exec(string(contents)) return &res, err } // MustExec execs the query using e and panics if there was an error. // Any placeholder parameters are replaced with supplied args. func MustExec(e Execer, query string, args ...interface{}) sql.Result { res, err := e.Exec(query, args...) if err != nil { panic(err) } return res } // SliceScan using this Rows. func (r *Row) SliceScan() ([]interface{}, error) { return SliceScan(r) } // MapScan using this Rows. func (r *Row) MapScan(dest map[string]interface{}) error { return MapScan(r, dest) } func (r *Row) scanAny(dest interface{}, structOnly bool) error { if r.err != nil { return r.err } if r.rows == nil { r.err = sql.ErrNoRows return r.err } defer r.rows.Close() v := reflect.ValueOf(dest) if v.Kind() != reflect.Ptr { return errors.New("must pass a pointer, not a value, to StructScan destination") } if v.IsNil() { return errors.New("nil pointer passed to StructScan destination") } base := reflectx.Deref(v.Type()) scannable := isScannable(base) if structOnly && scannable { return structOnlyError(base) } columns, err := r.Columns() if err != nil { return err } if scannable && len(columns) > 1 { return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) } if scannable { return r.Scan(dest) } m := r.Mapper fields := m.TraversalsByName(v.Type(), columns) // if we are not unsafe and are missing fields, return an error if f, err := missingFields(fields); err != nil && !r.unsafe { return fmt.Errorf("missing destination name %s in %T", columns[f], dest) } values := make([]interface{}, len(columns)) err = fieldsByTraversal(v, fields, values, true) if err != nil { return err } // scan into the struct field pointers and append to our results return r.Scan(values...) } // StructScan a single Row into dest. func (r *Row) StructScan(dest interface{}) error { return r.scanAny(dest, true) } // SliceScan a row, returning a []interface{} with values similar to MapScan. // This function is primarily intended for use where the number of columns // is not known. Because you can pass an []interface{} directly to Scan, // it's recommended that you do that as it will not have to allocate new // slices per row. func SliceScan(r ColScanner) ([]interface{}, error) { // ignore r.started, since we needn't use reflect for anything. columns, err := r.Columns() if err != nil { return []interface{}{}, err } values := make([]interface{}, len(columns)) for i := range values { values[i] = new(interface{}) } err = r.Scan(values...) if err != nil { return values, err } for i := range columns { values[i] = *(values[i].(*interface{})) } return values, r.Err() } // MapScan scans a single Row into the dest map[string]interface{}. // Use this to get results for SQL that might not be under your control // (for instance, if you're building an interface for an SQL server that // executes SQL from input). Please do not use this as a primary interface! // This will modify the map sent to it in place, so reuse the same map with // care. Columns which occur more than once in the result will overwrite // each other! func MapScan(r ColScanner, dest map[string]interface{}) error { // ignore r.started, since we needn't use reflect for anything. columns, err := r.Columns() if err != nil { return err } values := make([]interface{}, len(columns)) for i := range values { values[i] = new(interface{}) } err = r.Scan(values...) if err != nil { return err } for i, column := range columns { dest[column] = *(values[i].(*interface{})) } return r.Err() } type rowsi interface { Close() error Columns() ([]string, error) Err() error Next() bool Scan(...interface{}) error } // structOnlyError returns an error appropriate for type when a non-scannable // struct is expected but something else is given func structOnlyError(t reflect.Type) error { isStruct := t.Kind() == reflect.Struct isScanner := reflect.PtrTo(t).Implements(_scannerInterface) if !isStruct { return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) } if isScanner { return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) } return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) } // scanAll scans all rows into a destination, which must be a slice of any // type. It resets the slice length to zero before appending each element to // the slice. If the destination slice type is a Struct, then StructScan will // be used on each row. If the destination is some other kind of base type, // then each row must only have one column which can scan into that type. This // allows you to do something like: // // rows, _ := db.Query("select id from people;") // var ids []int // scanAll(rows, &ids, false) // // and ids will be a list of the id results. I realize that this is a desirable // interface to expose to users, but for now it will only be exposed via changes // to `Get` and `Select`. The reason that this has been implemented like this is // this is the only way to not duplicate reflect work in the new API while // maintaining backwards compatibility. func scanAll(rows rowsi, dest interface{}, structOnly bool) error { var v, vp reflect.Value value := reflect.ValueOf(dest) // json.Unmarshal returns errors for these if value.Kind() != reflect.Ptr { return errors.New("must pass a pointer, not a value, to StructScan destination") } if value.IsNil() { return errors.New("nil pointer passed to StructScan destination") } direct := reflect.Indirect(value) slice, err := baseType(value.Type(), reflect.Slice) if err != nil { return err } direct.SetLen(0) isPtr := slice.Elem().Kind() == reflect.Ptr base := reflectx.Deref(slice.Elem()) scannable := isScannable(base) if structOnly && scannable { return structOnlyError(base) } columns, err := rows.Columns() if err != nil { return err } // if it's a base type make sure it only has 1 column; if not return an error if scannable && len(columns) > 1 { return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) } if !scannable { var values []interface{} var m *reflectx.Mapper switch rows := rows.(type) { case *Rows: m = rows.Mapper default: m = mapper() } fields := m.TraversalsByName(base, columns) // if we are not unsafe and are missing fields, return an error if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { return fmt.Errorf("missing destination name %s in %T", columns[f], dest) } values = make([]interface{}, len(columns)) for rows.Next() { // create a new struct type (which returns PtrTo) and indirect it vp = reflect.New(base) v = reflect.Indirect(vp) err = fieldsByTraversal(v, fields, values, true) if err != nil { return err } // scan into the struct field pointers and append to our results err = rows.Scan(values...) if err != nil { return err } if isPtr { direct.Set(reflect.Append(direct, vp)) } else { direct.Set(reflect.Append(direct, v)) } } } else { for rows.Next() { vp = reflect.New(base) err = rows.Scan(vp.Interface()) if err != nil { return err } // append if isPtr { direct.Set(reflect.Append(direct, vp)) } else { direct.Set(reflect.Append(direct, reflect.Indirect(vp))) } } } return rows.Err() } // FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately // it doesn't really feel like it's named properly. There is an incongruency // between this and the way that StructScan (which might better be ScanStruct // anyway) works on a rows object. // StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. // StructScan will scan in the entire rows result, so if you do not want to // allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. // If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. func StructScan(rows rowsi, dest interface{}) error { return scanAll(rows, dest, true) } // reflect helpers func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { t = reflectx.Deref(t) if t.Kind() != expected { return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) } return t, nil } // fieldsByName fills a values interface with fields from the passed value based // on the traversals in int. If ptrs is true, return addresses instead of values. // We write this instead of using FieldsByName to save allocations and map lookups // when iterating over many rows. Empty traversals will get an interface pointer. // Because of the necessity of requesting ptrs or values, it's considered a bit too // specialized for inclusion in reflectx itself. func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { v = reflect.Indirect(v) if v.Kind() != reflect.Struct { return errors.New("argument not a struct") } for i, traversal := range traversals { if len(traversal) == 0 { values[i] = new(interface{}) continue } f := reflectx.FieldByIndexes(v, traversal) if ptrs { values[i] = f.Addr().Interface() } else { values[i] = f.Interface() } } return nil } func missingFields(transversals [][]int) (field int, err error) { for i, t := range transversals { if len(t) == 0 { return i, errors.New("missing field") } } return 0, nil } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/types/0000755000000000000000000000000015024302467022263 5ustar rootrootdependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/types/doc.go0000644000000000000000000000030015024302467023350 0ustar rootroot// Package types provides some useful types which implement the `sql.Scanner` // and `driver.Valuer` interfaces, suitable for use as scan and value targets with // database/sql. package types dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/types/README.md0000644000000000000000000000026615024302467023546 0ustar rootroot# types The types package provides some useful types which implement the `sql.Scanner` and `driver.Valuer` interfaces, suitable for use as scan and value targets with database/sql. dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/types/types.go0000644000000000000000000001047515024302467023765 0ustar rootrootpackage types import ( "bytes" "compress/gzip" "database/sql/driver" "encoding/json" "errors" "io/ioutil" ) // GzippedText is a []byte which transparently gzips data being submitted to // a database and ungzips data being Scanned from a database. type GzippedText []byte // Value implements the driver.Valuer interface, gzipping the raw value of // this GzippedText. func (g GzippedText) Value() (driver.Value, error) { b := make([]byte, 0, len(g)) buf := bytes.NewBuffer(b) w := gzip.NewWriter(buf) w.Write(g) w.Close() return buf.Bytes(), nil } // Scan implements the sql.Scanner interface, ungzipping the value coming off // the wire and storing the raw result in the GzippedText. func (g *GzippedText) Scan(src interface{}) error { var source []byte switch src := src.(type) { case string: source = []byte(src) case []byte: source = src default: //lint:ignore ST1005 changing this could break consumers of this package return errors.New("Incompatible type for GzippedText") } reader, err := gzip.NewReader(bytes.NewReader(source)) if err != nil { return err } defer reader.Close() b, err := ioutil.ReadAll(reader) if err != nil { return err } *g = GzippedText(b) return nil } // JSONText is a json.RawMessage, which is a []byte underneath. // Value() validates the json format in the source, and returns an error if // the json is not valid. Scan does no validation. JSONText additionally // implements `Unmarshal`, which unmarshals the json within to an interface{} type JSONText json.RawMessage var emptyJSON = JSONText("{}") // MarshalJSON returns the *j as the JSON encoding of j. func (j JSONText) MarshalJSON() ([]byte, error) { if len(j) == 0 { return emptyJSON, nil } return j, nil } // UnmarshalJSON sets *j to a copy of data func (j *JSONText) UnmarshalJSON(data []byte) error { if j == nil { return errors.New("JSONText: UnmarshalJSON on nil pointer") } *j = append((*j)[0:0], data...) return nil } // Value returns j as a value. This does a validating unmarshal into another // RawMessage. If j is invalid json, it returns an error. func (j JSONText) Value() (driver.Value, error) { var m json.RawMessage var err = j.Unmarshal(&m) if err != nil { return []byte{}, err } return []byte(j), nil } // Scan stores the src in *j. No validation is done. func (j *JSONText) Scan(src interface{}) error { var source []byte switch t := src.(type) { case string: source = []byte(t) case []byte: if len(t) == 0 { source = emptyJSON } else { source = t } case nil: *j = emptyJSON default: //lint:ignore ST1005 changing this could break consumers of this package return errors.New("Incompatible type for JSONText") } *j = append((*j)[0:0], source...) return nil } // Unmarshal unmarshal's the json in j to v, as in json.Unmarshal. func (j *JSONText) Unmarshal(v interface{}) error { if len(*j) == 0 { *j = emptyJSON } return json.Unmarshal([]byte(*j), v) } // String supports pretty printing for JSONText types. func (j JSONText) String() string { return string(j) } // NullJSONText represents a JSONText that may be null. // NullJSONText implements the scanner interface so // it can be used as a scan destination, similar to NullString. type NullJSONText struct { JSONText Valid bool // Valid is true if JSONText is not NULL } // Scan implements the Scanner interface. func (n *NullJSONText) Scan(value interface{}) error { if value == nil { n.JSONText, n.Valid = emptyJSON, false return nil } n.Valid = true return n.JSONText.Scan(value) } // Value implements the driver Valuer interface. func (n NullJSONText) Value() (driver.Value, error) { if !n.Valid { return nil, nil } return n.JSONText.Value() } // BitBool is an implementation of a bool for the MySQL type BIT(1). // This type allows you to avoid wasting an entire byte for MySQL's boolean type TINYINT. type BitBool bool // Value implements the driver.Valuer interface, // and turns the BitBool into a bitfield (BIT(1)) for MySQL storage. func (b BitBool) Value() (driver.Value, error) { if b { return []byte{1}, nil } return []byte{0}, nil } // Scan implements the sql.Scanner interface, // and turns the bitfield incoming from MySQL into a BitBool func (b *BitBool) Scan(src interface{}) error { v, ok := src.([]byte) if !ok { return errors.New("bad []byte type assertion") } *b = v[0] == 1 return nil } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/types/types_test.go0000644000000000000000000000501415024302467025015 0ustar rootrootpackage types import "testing" func TestGzipText(t *testing.T) { g := GzippedText("Hello, world") v, err := g.Value() if err != nil { t.Errorf("Was not expecting an error") } err = (&g).Scan(v) if err != nil { t.Errorf("Was not expecting an error") } if string(g) != "Hello, world" { t.Errorf("Was expecting the string we sent in (Hello World), got %s", string(g)) } } func TestJSONText(t *testing.T) { j := JSONText(`{"foo": 1, "bar": 2}`) v, err := j.Value() if err != nil { t.Errorf("Was not expecting an error") } err = (&j).Scan(v) if err != nil { t.Errorf("Was not expecting an error") } m := map[string]interface{}{} j.Unmarshal(&m) if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 { t.Errorf("Expected valid json but got some garbage instead? %#v", m) } j = JSONText(`{"foo": 1, invalid, false}`) _, err = j.Value() if err == nil { t.Errorf("Was expecting invalid json to fail!") } j = JSONText("") v, err = j.Value() if err != nil { t.Errorf("Was not expecting an error") } err = (&j).Scan(v) if err != nil { t.Errorf("Was not expecting an error") } j = JSONText(nil) v, err = j.Value() if err != nil { t.Errorf("Was not expecting an error") } err = (&j).Scan(v) if err != nil { t.Errorf("Was not expecting an error") } } func TestNullJSONText(t *testing.T) { j := NullJSONText{} err := j.Scan(`{"foo": 1, "bar": 2}`) if err != nil { t.Errorf("Was not expecting an error") } v, err := j.Value() if err != nil { t.Errorf("Was not expecting an error") } err = (&j).Scan(v) if err != nil { t.Errorf("Was not expecting an error") } m := map[string]interface{}{} j.Unmarshal(&m) if m["foo"].(float64) != 1 || m["bar"].(float64) != 2 { t.Errorf("Expected valid json but got some garbage instead? %#v", m) } j = NullJSONText{} err = j.Scan(nil) if err != nil { t.Errorf("Was not expecting an error") } if j.Valid != false { t.Errorf("Expected valid to be false, but got true") } } func TestBitBool(t *testing.T) { // Test true value var b BitBool = true v, err := b.Value() if err != nil { t.Errorf("Cannot return error") } err = (&b).Scan(v) if err != nil { t.Errorf("Was not expecting an error") } if !b { t.Errorf("Was expecting the bool we sent in (true), got %v", b) } // Test false value b = false v, err = b.Value() if err != nil { t.Errorf("Cannot return error") } err = (&b).Scan(v) if err != nil { t.Errorf("Was not expecting an error") } if b { t.Errorf("Was expecting the bool we sent in (false), got %v", b) } } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/sqlx_context_test.go0000644000000000000000000011436615024302467025253 0ustar rootroot//go:build go1.8 // +build go1.8 // The following environment variables, if set, will be used: // // - SQLX_SQLITE_DSN // - SQLX_POSTGRES_DSN // - SQLX_MYSQL_DSN // // Set any of these variables to 'skip' to skip them. Note that for MySQL, // the string '?parseTime=True' will be appended to the DSN if it's not there // already. package sqlx import ( "context" "database/sql" "encoding/json" "fmt" "log" "strings" "testing" "time" _ "github.com/go-sql-driver/mysql" _ "github.com/lib/pq" _ "github.com/mattn/go-sqlite3" "github.com/jmoiron/sqlx/reflectx" ) func MultiExecContext(ctx context.Context, e ExecerContext, query string) { stmts := strings.Split(query, ";\n") if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 { stmts = stmts[:len(stmts)-1] } for _, s := range stmts { _, err := e.ExecContext(ctx, s) if err != nil { fmt.Println(err, s) } } } func RunWithSchemaContext(ctx context.Context, schema Schema, t *testing.T, test func(ctx context.Context, db *DB, t *testing.T)) { runner := func(ctx context.Context, db *DB, t *testing.T, create, drop, now string) { defer func() { MultiExecContext(ctx, db, drop) }() MultiExecContext(ctx, db, create) test(ctx, db, t) } if TestPostgres { create, drop, now := schema.Postgres() runner(ctx, pgdb, t, create, drop, now) } if TestSqlite { create, drop, now := schema.Sqlite3() runner(ctx, sldb, t, create, drop, now) } if TestMysql { create, drop, now := schema.MySQL() runner(ctx, mysqldb, t, create, drop, now) } } func loadDefaultFixtureContext(ctx context.Context, db *DB, t *testing.T) { tx := db.MustBeginTx(ctx, nil) tx.MustExecContext(ctx, tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net") tx.MustExecContext(ctx, tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net") tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") if db.DriverName() == "mysql" { tx.MustExecContext(ctx, tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27") } else { tx.MustExecContext(ctx, tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27") } tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444") tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444") tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444") tx.Commit() } // Test a new backwards compatible feature, that missing scan destinations // will silently scan into sql.RawText rather than failing/panicing func TestMissingNamesContextContext(t *testing.T) { RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { loadDefaultFixtureContext(ctx, db, t) type PersonPlus struct { FirstName string `db:"first_name"` LastName string `db:"last_name"` Email string // AddedAt time.Time `db:"added_at"` } // test Select first pps := []PersonPlus{} // pps lacks added_at destination err := db.SelectContext(ctx, &pps, "SELECT * FROM person") if err == nil { t.Error("Expected missing name from Select to fail, but it did not.") } // test Get pp := PersonPlus{} err = db.GetContext(ctx, &pp, "SELECT * FROM person LIMIT 1") if err == nil { t.Error("Expected missing name Get to fail, but it did not.") } // test naked StructScan pps = []PersonPlus{} rows, err := db.QueryContext(ctx, "SELECT * FROM person LIMIT 1") if err != nil { t.Fatal(err) } rows.Next() err = StructScan(rows, &pps) if err == nil { t.Error("Expected missing name in StructScan to fail, but it did not.") } rows.Close() // now try various things with unsafe set. db = db.Unsafe() pps = []PersonPlus{} err = db.SelectContext(ctx, &pps, "SELECT * FROM person") if err != nil { t.Error(err) } // test Get pp = PersonPlus{} err = db.GetContext(ctx, &pp, "SELECT * FROM person LIMIT 1") if err != nil { t.Error(err) } // test naked StructScan pps = []PersonPlus{} rowsx, err := db.QueryxContext(ctx, "SELECT * FROM person LIMIT 1") if err != nil { t.Fatal(err) } rowsx.Next() err = StructScan(rowsx, &pps) if err != nil { t.Error(err) } rowsx.Close() // test Named stmt if !isUnsafe(db) { t.Error("Expected db to be unsafe, but it isn't") } nstmt, err := db.PrepareNamedContext(ctx, `SELECT * FROM person WHERE first_name != :name`) if err != nil { t.Fatal(err) } // its internal stmt should be marked unsafe if !nstmt.Stmt.unsafe { t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety") } pps = []PersonPlus{} err = nstmt.SelectContext(ctx, &pps, map[string]interface{}{"name": "Jason"}) if err != nil { t.Fatal(err) } if len(pps) != 1 { t.Errorf("Expected 1 person back, got %d", len(pps)) } // test it with a safe db db.unsafe = false if isUnsafe(db) { t.Error("expected db to be safe but it isn't") } nstmt, err = db.PrepareNamedContext(ctx, `SELECT * FROM person WHERE first_name != :name`) if err != nil { t.Fatal(err) } // it should be safe if isUnsafe(nstmt) { t.Error("NamedStmt did not inherit safety") } nstmt.Unsafe() if !isUnsafe(nstmt) { t.Error("expected newly unsafed NamedStmt to be unsafe") } pps = []PersonPlus{} err = nstmt.SelectContext(ctx, &pps, map[string]interface{}{"name": "Jason"}) if err != nil { t.Fatal(err) } if len(pps) != 1 { t.Errorf("Expected 1 person back, got %d", len(pps)) } }) } func TestEmbeddedStructsContextContext(t *testing.T) { type Loop1 struct{ Person } type Loop2 struct{ Loop1 } type Loop3 struct{ Loop2 } RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { loadDefaultFixtureContext(ctx, db, t) peopleAndPlaces := []PersonPlace{} err := db.SelectContext( ctx, &peopleAndPlaces, `SELECT person.*, place.* FROM person natural join place`) if err != nil { t.Fatal(err) } for _, pp := range peopleAndPlaces { if len(pp.Person.FirstName) == 0 { t.Errorf("Expected non zero lengthed first name.") } if len(pp.Place.Country) == 0 { t.Errorf("Expected non zero lengthed country.") } } // test embedded structs with StructScan rows, err := db.QueryxContext( ctx, `SELECT person.*, place.* FROM person natural join place`) if err != nil { t.Error(err) } perp := PersonPlace{} rows.Next() err = rows.StructScan(&perp) if err != nil { t.Error(err) } if len(perp.Person.FirstName) == 0 { t.Errorf("Expected non zero lengthed first name.") } if len(perp.Place.Country) == 0 { t.Errorf("Expected non zero lengthed country.") } rows.Close() // test the same for embedded pointer structs peopleAndPlacesPtrs := []PersonPlacePtr{} err = db.SelectContext( ctx, &peopleAndPlacesPtrs, `SELECT person.*, place.* FROM person natural join place`) if err != nil { t.Fatal(err) } for _, pp := range peopleAndPlacesPtrs { if len(pp.Person.FirstName) == 0 { t.Errorf("Expected non zero lengthed first name.") } if len(pp.Place.Country) == 0 { t.Errorf("Expected non zero lengthed country.") } } // test "deep nesting" l3s := []Loop3{} err = db.SelectContext(ctx, &l3s, `select * from person`) if err != nil { t.Fatal(err) } for _, l3 := range l3s { if len(l3.Loop2.Loop1.Person.FirstName) == 0 { t.Errorf("Expected non zero lengthed first name.") } } // test "embed conflicts" ec := []EmbedConflict{} err = db.SelectContext(ctx, &ec, `select * from person`) // I'm torn between erroring here or having some kind of working behavior // in order to allow for more flexibility in destination structs if err != nil { t.Errorf("Was not expecting an error on embed conflicts.") } }) } func TestJoinQueryContext(t *testing.T) { type Employee struct { Name string ID int64 // BossID is an id into the employee table BossID sql.NullInt64 `db:"boss_id"` } type Boss Employee RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { loadDefaultFixtureContext(ctx, db, t) var employees []struct { Employee Boss `db:"boss"` } err := db.SelectContext(ctx, &employees, `SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees JOIN employees AS boss ON employees.boss_id = boss.id`) if err != nil { t.Fatal(err) } for _, em := range employees { if len(em.Employee.Name) == 0 { t.Errorf("Expected non zero lengthed name.") } if em.Employee.BossID.Int64 != em.Boss.ID { t.Errorf("Expected boss ids to match") } } }) } func TestJoinQueryNamedPointerStructsContext(t *testing.T) { type Employee struct { Name string ID int64 // BossID is an id into the employee table BossID sql.NullInt64 `db:"boss_id"` } type Boss Employee RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { loadDefaultFixtureContext(ctx, db, t) var employees []struct { Emp1 *Employee `db:"emp1"` Emp2 *Employee `db:"emp2"` *Boss `db:"boss"` } err := db.SelectContext(ctx, &employees, `SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id", emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id", boss.id "boss.id", boss.name "boss.name" FROM employees AS emp JOIN employees AS boss ON emp.boss_id = boss.id `) if err != nil { t.Fatal(err) } for _, em := range employees { if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 { t.Errorf("Expected non zero lengthed name.") } if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID { t.Errorf("Expected boss ids to match") } } }) } func TestSelectSliceMapTimeContext(t *testing.T) { RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { loadDefaultFixtureContext(ctx, db, t) rows, err := db.QueryxContext(ctx, "SELECT * FROM person") if err != nil { t.Fatal(err) } for rows.Next() { _, err := rows.SliceScan() if err != nil { t.Error(err) } } rows, err = db.QueryxContext(ctx, "SELECT * FROM person") if err != nil { t.Fatal(err) } for rows.Next() { m := map[string]interface{}{} err := rows.MapScan(m) if err != nil { t.Error(err) } } }) } func TestNilReceiverContext(t *testing.T) { RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { loadDefaultFixtureContext(ctx, db, t) var p *Person err := db.GetContext(ctx, p, "SELECT * FROM person LIMIT 1") if err == nil { t.Error("Expected error when getting into nil struct ptr.") } var pp *[]Person err = db.SelectContext(ctx, pp, "SELECT * FROM person") if err == nil { t.Error("Expected an error when selecting into nil slice ptr.") } }) } func TestNamedQueryContext(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE place ( id integer PRIMARY KEY, name text NULL ); CREATE TABLE person ( first_name text NULL, last_name text NULL, email text NULL ); CREATE TABLE placeperson ( first_name text NULL, last_name text NULL, email text NULL, place_id integer NULL ); CREATE TABLE jsperson ( "FIRST" text NULL, last_name text NULL, "EMAIL" text NULL );`, drop: ` drop table person; drop table jsperson; drop table place; drop table placeperson; `, } RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { type Person struct { FirstName sql.NullString `db:"first_name"` LastName sql.NullString `db:"last_name"` Email sql.NullString } p := Person{ FirstName: sql.NullString{String: "ben", Valid: true}, LastName: sql.NullString{String: "doe", Valid: true}, Email: sql.NullString{String: "ben@doe.com", Valid: true}, } q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)` _, err := db.NamedExecContext(ctx, q1, p) if err != nil { log.Fatal(err) } p2 := &Person{} rows, err := db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first_name", p) if err != nil { log.Fatal(err) } for rows.Next() { err = rows.StructScan(p2) if err != nil { t.Error(err) } if p2.FirstName.String != "ben" { t.Error("Expected first name of `ben`, got " + p2.FirstName.String) } if p2.LastName.String != "doe" { t.Error("Expected first name of `doe`, got " + p2.LastName.String) } } // these are tests for #73; they verify that named queries work if you've // changed the db mapper. This code checks both NamedQuery "ad-hoc" style // queries and NamedStmt queries, which use different code paths internally. old := (*db).Mapper type JSONPerson struct { FirstName sql.NullString `json:"FIRST"` LastName sql.NullString `json:"last_name"` Email sql.NullString } jp := JSONPerson{ FirstName: sql.NullString{String: "ben", Valid: true}, LastName: sql.NullString{String: "smith", Valid: true}, Email: sql.NullString{String: "ben@smith.com", Valid: true}, } db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper) // prepare queries for case sensitivity to test our ToUpper function. // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line // strings are `` we use "" by default and swap out for MySQL pdb := func(s string, db *DB) string { if db.DriverName() == "mysql" { return strings.Replace(s, `"`, "`", -1) } return s } q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)` _, err = db.NamedExecContext(ctx, pdb(q1, db), jp) if err != nil { t.Fatal(err, db.DriverName()) } // Checks that a person pulled out of the db matches the one we put in check := func(t *testing.T, rows *Rows) { jp = JSONPerson{} for rows.Next() { err = rows.StructScan(&jp) if err != nil { t.Error(err) } if jp.FirstName.String != "ben" { t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName()) } if jp.LastName.String != "smith" { t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName()) } if jp.Email.String != "ben@smith.com" { t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName()) } } } ns, err := db.PrepareNamed(pdb(` SELECT * FROM jsperson WHERE "FIRST"=:FIRST AND last_name=:last_name AND "EMAIL"=:EMAIL `, db)) if err != nil { t.Fatal(err) } rows, err = ns.QueryxContext(ctx, jp) if err != nil { t.Fatal(err) } check(t, rows) // Check exactly the same thing, but with db.NamedQuery, which does not go // through the PrepareNamed/NamedStmt path. rows, err = db.NamedQueryContext(ctx, pdb(` SELECT * FROM jsperson WHERE "FIRST"=:FIRST AND last_name=:last_name AND "EMAIL"=:EMAIL `, db), jp) if err != nil { t.Fatal(err) } check(t, rows) db.Mapper = old // Test nested structs type Place struct { ID int `db:"id"` Name sql.NullString `db:"name"` } type PlacePerson struct { FirstName sql.NullString `db:"first_name"` LastName sql.NullString `db:"last_name"` Email sql.NullString Place Place `db:"place"` } pl := Place{ Name: sql.NullString{String: "myplace", Valid: true}, } pp := PlacePerson{ FirstName: sql.NullString{String: "ben", Valid: true}, LastName: sql.NullString{String: "doe", Valid: true}, Email: sql.NullString{String: "ben@doe.com", Valid: true}, } q2 := `INSERT INTO place (id, name) VALUES (1, :name)` _, err = db.NamedExecContext(ctx, q2, pl) if err != nil { log.Fatal(err) } id := 1 pp.Place.ID = id q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)` _, err = db.NamedExecContext(ctx, q3, pp) if err != nil { log.Fatal(err) } pp2 := &PlacePerson{} rows, err = db.NamedQueryContext(ctx, ` SELECT first_name, last_name, email, place.id AS "place.id", place.name AS "place.name" FROM placeperson INNER JOIN place ON place.id = placeperson.place_id WHERE place.id=:place.id`, pp) if err != nil { log.Fatal(err) } for rows.Next() { err = rows.StructScan(pp2) if err != nil { t.Error(err) } if pp2.FirstName.String != "ben" { t.Error("Expected first name of `ben`, got " + pp2.FirstName.String) } if pp2.LastName.String != "doe" { t.Error("Expected first name of `doe`, got " + pp2.LastName.String) } if pp2.Place.Name.String != "myplace" { t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String) } if pp2.Place.ID != pp.Place.ID { t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID) } } }) } func TestNilInsertsContext(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE tt ( id integer, value text NULL DEFAULT NULL );`, drop: "drop table tt;", } RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { type TT struct { ID int Value *string } var v, v2 TT r := db.Rebind db.MustExecContext(ctx, r(`INSERT INTO tt (id) VALUES (1)`)) db.GetContext(ctx, &v, r(`SELECT * FROM tt`)) if v.ID != 1 { t.Errorf("Expecting id of 1, got %v", v.ID) } if v.Value != nil { t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) } v.ID = 2 // NOTE: this incidentally uncovered a bug which was that named queries with // pointer destinations would not work if the passed value here was not addressable, // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly // function. This next line is important as it provides the only coverage for this. db.NamedExecContext(ctx, `INSERT INTO tt (id, value) VALUES (:id, :value)`, v) db.GetContext(ctx, &v2, r(`SELECT * FROM tt WHERE id=2`)) if v.ID != v2.ID { t.Errorf("%v != %v", v.ID, v2.ID) } if v2.Value != nil { t.Errorf("Expecting NULL to map to nil, got %s", *v.Value) } }) } func TestScanErrorContext(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE kv ( k text, v integer );`, drop: `drop table kv;`, } RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { type WrongTypes struct { K int V string } _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1) if err != nil { t.Error(err) } rows, err := db.QueryxContext(ctx, "SELECT * FROM kv") if err != nil { t.Error(err) } for rows.Next() { var wt WrongTypes err := rows.StructScan(&wt) if err == nil { t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName()) } } }) } // FIXME: this function is kinda big but it slows things down to be constantly // loading and reloading the schema.. func TestUsageContext(t *testing.T) { RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { loadDefaultFixtureContext(ctx, db, t) slicemembers := []SliceMember{} err := db.SelectContext(ctx, &slicemembers, "SELECT * FROM place ORDER BY telcode ASC") if err != nil { t.Fatal(err) } people := []Person{} err = db.SelectContext(ctx, &people, "SELECT * FROM person ORDER BY first_name ASC") if err != nil { t.Fatal(err) } jason, john := people[0], people[1] if jason.FirstName != "Jason" { t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName) } if jason.LastName != "Moiron" { t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName) } if jason.Email != "jmoiron@jmoiron.net" { t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email) } if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" { t.Errorf("John Doe's person record not what expected: Got %v\n", john) } jason = Person{} err = db.GetContext(ctx, &jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason") if err != nil { t.Fatal(err) } if jason.FirstName != "Jason" { t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName) } err = db.GetContext(ctx, &jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar") if err == nil { t.Errorf("Expecting an error, got nil\n") } if err != sql.ErrNoRows { t.Errorf("Expected sql.ErrNoRows, got %v\n", err) } // The following tests check statement reuse, which was actually a problem // due to copying being done when creating Stmt's which was eventually removed stmt1, err := db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) if err != nil { t.Fatal(err) } jason = Person{} row := stmt1.QueryRowx("DoesNotExist") row.Scan(&jason) row = stmt1.QueryRowx("DoesNotExist") row.Scan(&jason) err = stmt1.GetContext(ctx, &jason, "DoesNotExist User") if err == nil { t.Error("Expected an error") } err = stmt1.GetContext(ctx, &jason, "DoesNotExist User 2") if err == nil { t.Fatal(err) } stmt2, err := db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) if err != nil { t.Fatal(err) } jason = Person{} tx, err := db.Beginx() if err != nil { t.Fatal(err) } tstmt2 := tx.Stmtx(stmt2) row2 := tstmt2.QueryRowx("Jason") err = row2.StructScan(&jason) if err != nil { t.Error(err) } tx.Commit() places := []*Place{} err = db.SelectContext(ctx, &places, "SELECT telcode FROM place ORDER BY telcode ASC") if err != nil { t.Fatal(err) } usa, singsing, honkers := places[0], places[1], places[2] if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { t.Errorf("Expected integer telcodes to work, got %#v", places) } placesptr := []PlacePtr{} err = db.SelectContext(ctx, &placesptr, "SELECT * FROM place ORDER BY telcode ASC") if err != nil { t.Error(err) } // fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2]) // if you have null fields and use SELECT *, you must use sql.Null* in your struct // this test also verifies that you can use either a []Struct{} or a []*Struct{} places2 := []Place{} err = db.SelectContext(ctx, &places2, "SELECT * FROM place ORDER BY telcode ASC") if err != nil { t.Fatal(err) } usa, singsing, honkers = &places2[0], &places2[1], &places2[2] // this should return a type error that &p is not a pointer to a struct slice p := Place{} err = db.SelectContext(ctx, &p, "SELECT * FROM place ORDER BY telcode ASC") if err == nil { t.Errorf("Expected an error, argument to select should be a pointer to a struct slice") } // this should be an error pl := []Place{} err = db.SelectContext(ctx, pl, "SELECT * FROM place ORDER BY telcode ASC") if err == nil { t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.") } if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 { t.Errorf("Expected integer telcodes to work, got %#v", places) } stmt, err := db.PreparexContext(ctx, db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC")) if err != nil { t.Error(err) } places = []*Place{} err = stmt.SelectContext(ctx, &places, 10) if len(places) != 2 { t.Error("Expected 2 places, got 0.") } if err != nil { t.Fatal(err) } singsing, honkers = places[0], places[1] if singsing.TelCode != 65 || honkers.TelCode != 852 { t.Errorf("Expected the right telcodes, got %#v", places) } rows, err := db.QueryxContext(ctx, "SELECT * FROM place") if err != nil { t.Fatal(err) } place := Place{} for rows.Next() { err = rows.StructScan(&place) if err != nil { t.Fatal(err) } } rows, err = db.QueryxContext(ctx, "SELECT * FROM place") if err != nil { t.Fatal(err) } m := map[string]interface{}{} for rows.Next() { err = rows.MapScan(m) if err != nil { t.Fatal(err) } _, ok := m["country"] if !ok { t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m) } } rows, err = db.QueryxContext(ctx, "SELECT * FROM place") if err != nil { t.Fatal(err) } for rows.Next() { s, err := rows.SliceScan() if err != nil { t.Error(err) } if len(s) != 3 { t.Errorf("Expected 3 columns in result, got %d\n", len(s)) } } // test advanced querying // test that NamedExec works with a map as well as a struct _, err = db.NamedExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{ "first": "Bin", "last": "Smuth", "email": "bensmith@allblacks.nz", }) if err != nil { t.Fatal(err) } // ensure that if the named param happens right at the end it still works // ensure that NamedQuery works with a map[string]interface{} rows, err = db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"}) if err != nil { t.Fatal(err) } ben := &Person{} for rows.Next() { err = rows.StructScan(ben) if err != nil { t.Fatal(err) } if ben.FirstName != "Bin" { t.Fatal("Expected first name of `Bin`, got " + ben.FirstName) } if ben.LastName != "Smuth" { t.Fatal("Expected first name of `Smuth`, got " + ben.LastName) } } ben.FirstName = "Ben" ben.LastName = "Smith" ben.Email = "binsmuth@allblacks.nz" // Insert via a named query using the struct _, err = db.NamedExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben) if err != nil { t.Fatal(err) } rows, err = db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first_name", ben) if err != nil { t.Fatal(err) } for rows.Next() { err = rows.StructScan(ben) if err != nil { t.Fatal(err) } if ben.FirstName != "Ben" { t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) } if ben.LastName != "Smith" { t.Fatal("Expected first name of `Smith`, got " + ben.LastName) } } // ensure that Get does not panic on emppty result set person := &Person{} err = db.GetContext(ctx, person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist") if err == nil { t.Fatal("Should have got an error for Get on non-existent row.") } // lets test prepared statements some more stmt, err = db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) if err != nil { t.Fatal(err) } rows, err = stmt.QueryxContext(ctx, "Ben") if err != nil { t.Fatal(err) } for rows.Next() { err = rows.StructScan(ben) if err != nil { t.Fatal(err) } if ben.FirstName != "Ben" { t.Fatal("Expected first name of `Ben`, got " + ben.FirstName) } if ben.LastName != "Smith" { t.Fatal("Expected first name of `Smith`, got " + ben.LastName) } } john = Person{} stmt, err = db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?")) if err != nil { t.Error(err) } err = stmt.GetContext(ctx, &john, "John") if err != nil { t.Error(err) } // test name mapping // THIS USED TO WORK BUT WILL NO LONGER WORK. db.MapperFunc(strings.ToUpper) rsa := CPlace{} err = db.GetContext(ctx, &rsa, "SELECT * FROM capplace;") if err != nil { t.Error(err, "in db:", db.DriverName()) } db.MapperFunc(strings.ToLower) // create a copy and change the mapper, then verify the copy behaves // differently from the original. dbCopy := NewDb(db.DB, db.DriverName()) dbCopy.MapperFunc(strings.ToUpper) err = dbCopy.GetContext(ctx, &rsa, "SELECT * FROM capplace;") if err != nil { fmt.Println(db.DriverName()) t.Error(err) } err = db.GetContext(ctx, &rsa, "SELECT * FROM cappplace;") if err == nil { t.Error("Expected no error, got ", err) } // test base type slices var sdest []string rows, err = db.QueryxContext(ctx, "SELECT email FROM person ORDER BY email ASC;") if err != nil { t.Error(err) } err = scanAll(rows, &sdest, false) if err != nil { t.Error(err) } // test Get with base types var count int err = db.GetContext(ctx, &count, "SELECT count(*) FROM person;") if err != nil { t.Error(err) } if count != len(sdest) { t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest)) } // test Get and Select with time.Time, #84 var addedAt time.Time err = db.GetContext(ctx, &addedAt, "SELECT added_at FROM person LIMIT 1;") if err != nil { t.Error(err) } var addedAts []time.Time err = db.SelectContext(ctx, &addedAts, "SELECT added_at FROM person;") if err != nil { t.Error(err) } // test it on a double pointer var pcount *int err = db.GetContext(ctx, &pcount, "SELECT count(*) FROM person;") if err != nil { t.Error(err) } if *pcount != count { t.Errorf("expected %d = %d", *pcount, count) } // test Select... sdest = []string{} err = db.SelectContext(ctx, &sdest, "SELECT first_name FROM person ORDER BY first_name ASC;") if err != nil { t.Error(err) } expected := []string{"Ben", "Bin", "Jason", "John"} for i, got := range sdest { if got != expected[i] { t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got) } } var nsdest []sql.NullString err = db.SelectContext(ctx, &nsdest, "SELECT city FROM place ORDER BY city ASC") if err != nil { t.Error(err) } for _, val := range nsdest { if val.Valid && val.String != "New York" { t.Errorf("expected single valid result to be `New York`, but got %s", val.String) } } }) } // tests that sqlx will not panic when the wrong driver is passed because // of an automatic nil dereference in sqlx.Open(), which was fixed. func TestDoNotPanicOnConnectContext(t *testing.T) { _, err := ConnectContext(context.Background(), "bogus", "hehe") if err == nil { t.Errorf("Should return error when using bogus driverName") } } func TestEmbeddedMapsContext(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE message ( string text, properties text );`, drop: `drop table message;`, } RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { messages := []Message{ {"Hello, World", PropertyMap{"one": "1", "two": "2"}}, {"Thanks, Joy", PropertyMap{"pull": "request"}}, } q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);` for _, m := range messages { _, err := db.NamedExecContext(ctx, q1, m) if err != nil { t.Fatal(err) } } var count int err := db.GetContext(ctx, &count, "SELECT count(*) FROM message") if err != nil { t.Fatal(err) } if count != len(messages) { t.Fatalf("Expected %d messages in DB, found %d", len(messages), count) } var m Message err = db.GetContext(ctx, &m, "SELECT * FROM message LIMIT 1;") if err != nil { t.Fatal(err) } if m.Properties == nil { t.Fatal("Expected m.Properties to not be nil, but it was.") } }) } func TestIssue197Context(t *testing.T) { // this test actually tests for a bug in database/sql: // https://github.com/golang/go/issues/13905 // this potentially makes _any_ named type that is an alias for []byte // unsafe to use in a lot of different ways (basically, unsafe to hold // onto after loading from the database). t.Skip() type mybyte []byte type Var struct{ Raw json.RawMessage } type Var2 struct{ Raw []byte } type Var3 struct{ Raw mybyte } RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { var err error var v, q Var if err = db.GetContext(ctx, &v, `SELECT '{"a": "b"}' AS raw`); err != nil { t.Fatal(err) } if err = db.GetContext(ctx, &q, `SELECT 'null' AS raw`); err != nil { t.Fatal(err) } var v2, q2 Var2 if err = db.GetContext(ctx, &v2, `SELECT '{"a": "b"}' AS raw`); err != nil { t.Fatal(err) } if err = db.GetContext(ctx, &q2, `SELECT 'null' AS raw`); err != nil { t.Fatal(err) } var v3, q3 Var3 if err = db.QueryRowContext(ctx, `SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil { t.Fatal(err) } if err = db.QueryRowContext(ctx, `SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil { t.Fatal(err) } t.Fail() }) } func TestInContext(t *testing.T) { // some quite normal situations type tr struct { q string args []interface{} c int } tests := []tr{ {"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?", []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}, 7}, {"SELECT * FROM foo WHERE x in (?)", []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}}, 8}, } for _, test := range tests { q, a, err := In(test.q, test.args...) if err != nil { t.Error(err) } if len(a) != test.c { t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a) } if strings.Count(q, "?") != test.c { t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?")) } } // too many bindVars, but no slices, so short circuits parsing // i'm not sure if this is the right behavior; this query/arg combo // might not work, but we shouldn't parse if we don't need to { orig := "SELECT * FROM foo WHERE x = ? AND y = ?" q, a, err := In(orig, "foo", "bar", "baz") if err != nil { t.Error(err) } if len(a) != 3 { t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a) } if q != orig { t.Error("Expected unchanged query.") } } tests = []tr{ // too many bindvars; slice present so should return error during parse {"SELECT * FROM foo WHERE x = ? and y = ?", []interface{}{"foo", []int{1, 2, 3}, "bar"}, 0}, // empty slice, should return error before parse {"SELECT * FROM foo WHERE x = ?", []interface{}{[]int{}}, 0}, // too *few* bindvars, should return an error {"SELECT * FROM foo WHERE x = ? AND y in (?)", []interface{}{[]int{1, 2, 3}}, 0}, } for _, test := range tests { _, _, err := In(test.q, test.args...) if err == nil { t.Error("Expected an error, but got nil.") } } RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) { loadDefaultFixtureContext(ctx, db, t) // tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1") // tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852") // tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65") telcodes := []int{852, 65} q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode" query, args, err := In(q, telcodes) if err != nil { t.Error(err) } query = db.Rebind(query) places := []Place{} err = db.SelectContext(ctx, &places, query, args...) if err != nil { t.Error(err) } if len(places) != 2 { t.Fatalf("Expecting 2 results, got %d", len(places)) } if places[0].TelCode != 65 { t.Errorf("Expecting singapore first, but got %#v", places[0]) } if places[1].TelCode != 852 { t.Errorf("Expecting hong kong second, but got %#v", places[1]) } }) } func TestEmbeddedLiteralsContext(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE x ( k text );`, drop: `drop table x;`, } RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { type t1 struct { K *string } type t2 struct { Inline struct { F string } K *string } db.MustExecContext(ctx, db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three") target := t1{} err := db.GetContext(ctx, &target, db.Rebind("SELECT * FROM x WHERE k=?"), "one") if err != nil { t.Error(err) } if *target.K != "one" { t.Error("Expected target.K to be `one`, got ", target.K) } target2 := t2{} err = db.GetContext(ctx, &target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one") if err != nil { t.Error(err) } if *target2.K != "one" { t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K) } }) } func TestConn(t *testing.T) { var schema = Schema{ create: ` CREATE TABLE tt_conn ( id integer, value text NULL DEFAULT NULL );`, drop: "drop table tt_conn;", } RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) { conn, err := db.Connx(ctx) defer conn.Close() //lint:ignore SA5001 it's OK to ignore this here. if err != nil { t.Fatal(err) } _, err = conn.ExecContext(ctx, conn.Rebind(`INSERT INTO tt_conn (id, value) VALUES (?, ?), (?, ?)`), 1, "a", 2, "b") if err != nil { t.Fatal(err) } type s struct { ID int `db:"id"` Value string `db:"value"` } v := []s{} err = conn.SelectContext(ctx, &v, "SELECT * FROM tt_conn ORDER BY id ASC") if err != nil { t.Fatal(err) } if v[0].ID != 1 { t.Errorf("Expecting ID of 1, got %d", v[0].ID) } v1 := s{} err = conn.GetContext(ctx, &v1, conn.Rebind("SELECT * FROM tt_conn WHERE id=?"), 1) if err != nil { t.Fatal(err) } if v1.ID != 1 { t.Errorf("Expecting to get back 1, but got %v\n", v1.ID) } stmt, err := conn.PreparexContext(ctx, conn.Rebind("SELECT * FROM tt_conn WHERE id=?")) if err != nil { t.Fatal(err) } v1 = s{} tx, err := conn.BeginTxx(ctx, nil) if err != nil { t.Fatal(err) } tstmt := tx.Stmtx(stmt) row := tstmt.QueryRowx(1) err = row.StructScan(&v1) if err != nil { t.Error(err) } tx.Commit() if v1.ID != 1 { t.Errorf("Expecting to get back 1, but got %v\n", v1.ID) } rows, err := conn.QueryxContext(ctx, "SELECT * FROM tt_conn") if err != nil { t.Fatal(err) } for rows.Next() { err = rows.StructScan(&v1) if err != nil { t.Fatal(err) } } }) } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/bind.go0000644000000000000000000001410215024302467022360 0ustar rootrootpackage sqlx import ( "bytes" "database/sql/driver" "errors" "reflect" "strconv" "strings" "sync" "github.com/jmoiron/sqlx/reflectx" ) // Bindvar types supported by Rebind, BindMap and BindStruct. const ( UNKNOWN = iota QUESTION DOLLAR NAMED AT ) var defaultBinds = map[int][]string{ DOLLAR: []string{"postgres", "pgx", "pq-timeouts", "cloudsqlpostgres", "ql", "nrpostgres", "cockroach"}, QUESTION: []string{"mysql", "sqlite3", "nrmysql", "nrsqlite3"}, NAMED: []string{"oci8", "ora", "goracle", "godror"}, AT: []string{"sqlserver"}, } var binds sync.Map func init() { for bind, drivers := range defaultBinds { for _, driver := range drivers { BindDriver(driver, bind) } } } // BindType returns the bindtype for a given database given a drivername. func BindType(driverName string) int { itype, ok := binds.Load(driverName) if !ok { return UNKNOWN } return itype.(int) } // BindDriver sets the BindType for driverName to bindType. func BindDriver(driverName string, bindType int) { binds.Store(driverName, bindType) } // FIXME: this should be able to be tolerant of escaped ?'s in queries without // losing much speed, and should be to avoid confusion. // Rebind a query from the default bindtype (QUESTION) to the target bindtype. func Rebind(bindType int, query string) string { switch bindType { case QUESTION, UNKNOWN: return query } // Add space enough for 10 params before we have to allocate rqb := make([]byte, 0, len(query)+10) var i, j int for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { rqb = append(rqb, query[:i]...) switch bindType { case DOLLAR: rqb = append(rqb, '$') case NAMED: rqb = append(rqb, ':', 'a', 'r', 'g') case AT: rqb = append(rqb, '@', 'p') } j++ rqb = strconv.AppendInt(rqb, int64(j), 10) query = query[i+1:] } return string(append(rqb, query...)) } // Experimental implementation of Rebind which uses a bytes.Buffer. The code is // much simpler and should be more resistant to odd unicode, but it is twice as // slow. Kept here for benchmarking purposes and to possibly replace Rebind if // problems arise with its somewhat naive handling of unicode. func rebindBuff(bindType int, query string) string { if bindType != DOLLAR { return query } b := make([]byte, 0, len(query)) rqb := bytes.NewBuffer(b) j := 1 for _, r := range query { if r == '?' { rqb.WriteRune('$') rqb.WriteString(strconv.Itoa(j)) j++ } else { rqb.WriteRune(r) } } return rqb.String() } func asSliceForIn(i interface{}) (v reflect.Value, ok bool) { if i == nil { return reflect.Value{}, false } v = reflect.ValueOf(i) t := reflectx.Deref(v.Type()) // Only expand slices if t.Kind() != reflect.Slice { return reflect.Value{}, false } // []byte is a driver.Value type so it should not be expanded if t == reflect.TypeOf([]byte{}) { return reflect.Value{}, false } return v, true } // In expands slice values in args, returning the modified query string // and a new arg list that can be executed by a database. The `query` should // use the `?` bindVar. The return value uses the `?` bindVar. func In(query string, args ...interface{}) (string, []interface{}, error) { // argMeta stores reflect.Value and length for slices and // the value itself for non-slice arguments type argMeta struct { v reflect.Value i interface{} length int } var flatArgsCount int var anySlices bool var stackMeta [32]argMeta var meta []argMeta if len(args) <= len(stackMeta) { meta = stackMeta[:len(args)] } else { meta = make([]argMeta, len(args)) } for i, arg := range args { if a, ok := arg.(driver.Valuer); ok { var err error arg, err = a.Value() if err != nil { return "", nil, err } } if v, ok := asSliceForIn(arg); ok { meta[i].length = v.Len() meta[i].v = v anySlices = true flatArgsCount += meta[i].length if meta[i].length == 0 { return "", nil, errors.New("empty slice passed to 'in' query") } } else { meta[i].i = arg flatArgsCount++ } } // don't do any parsing if there aren't any slices; note that this means // some errors that we might have caught below will not be returned. if !anySlices { return query, args, nil } newArgs := make([]interface{}, 0, flatArgsCount) var buf strings.Builder buf.Grow(len(query) + len(", ?")*flatArgsCount) var arg, offset int for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { if arg >= len(meta) { // if an argument wasn't passed, lets return an error; this is // not actually how database/sql Exec/Query works, but since we are // creating an argument list programmatically, we want to be able // to catch these programmer errors earlier. return "", nil, errors.New("number of bindVars exceeds arguments") } argMeta := meta[arg] arg++ // not a slice, continue. // our questionmark will either be written before the next expansion // of a slice or after the loop when writing the rest of the query if argMeta.length == 0 { offset = offset + i + 1 newArgs = append(newArgs, argMeta.i) continue } // write everything up to and including our ? character buf.WriteString(query[:offset+i+1]) for si := 1; si < argMeta.length; si++ { buf.WriteString(", ?") } newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) // slice the query and reset the offset. this avoids some bookkeeping for // the write after the loop query = query[offset+i+1:] offset = 0 } buf.WriteString(query) if arg < len(meta) { return "", nil, errors.New("number of bindVars less than number arguments") } return buf.String(), newArgs, nil } func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { switch val := v.Interface().(type) { case []interface{}: args = append(args, val...) case []int: for i := range val { args = append(args, val[i]) } case []string: for i := range val { args = append(args, val[i]) } default: for si := 0; si < vlen; si++ { args = append(args, v.Index(si).Interface()) } } return args } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/named.go0000644000000000000000000003421515024302467022537 0ustar rootrootpackage sqlx // Named Query Support // // * BindMap - bind query bindvars to map/struct args // * NamedExec, NamedQuery - named query w/ struct or map // * NamedStmt - a pre-compiled named query which is a prepared statement // // Internal Interfaces: // // * compileNamedQuery - rebind a named query, returning a query and list of names // * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist // import ( "bytes" "database/sql" "errors" "fmt" "reflect" "regexp" "strconv" "unicode" "github.com/jmoiron/sqlx/reflectx" ) // NamedStmt is a prepared statement that executes named queries. Prepare it // how you would execute a NamedQuery, but pass in a struct or map when executing. type NamedStmt struct { Params []string QueryString string Stmt *Stmt } // Close closes the named statement. func (n *NamedStmt) Close() error { return n.Stmt.Close() } // Exec executes a named statement using the struct passed. // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) if err != nil { return *new(sql.Result), err } return n.Stmt.Exec(args...) } // Query executes a named statement using the struct argument, returning rows. // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) if err != nil { return nil, err } return n.Stmt.Query(args...) } // QueryRow executes a named statement against the database. Because sqlx cannot // create a *sql.Row with an error condition pre-set for binding errors, sqlx // returns a *sqlx.Row instead. // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) QueryRow(arg interface{}) *Row { args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) if err != nil { return &Row{err: err} } return n.Stmt.QueryRowx(args...) } // MustExec execs a NamedStmt, panicing on error // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) MustExec(arg interface{}) sql.Result { res, err := n.Exec(arg) if err != nil { panic(err) } return res } // Queryx using this NamedStmt // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { r, err := n.Query(arg) if err != nil { return nil, err } return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err } // QueryRowx this NamedStmt. Because of limitations with QueryRow, this is // an alias for QueryRow. // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) QueryRowx(arg interface{}) *Row { return n.QueryRow(arg) } // Select using this NamedStmt // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { rows, err := n.Queryx(arg) if err != nil { return err } // if something happens here, we want to make sure the rows are Closed defer rows.Close() return scanAll(rows, dest, false) } // Get using this NamedStmt // Any named placeholder parameters are replaced with fields from arg. func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { r := n.QueryRowx(arg) return r.scanAny(dest, false) } // Unsafe creates an unsafe version of the NamedStmt func (n *NamedStmt) Unsafe() *NamedStmt { r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} r.Stmt.unsafe = true return r } // A union interface of preparer and binder, required to be able to prepare // named statements (as the bindtype must be determined). type namedPreparer interface { Preparer binder } func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { bindType := BindType(p.DriverName()) q, args, err := compileNamedQuery([]byte(query), bindType) if err != nil { return nil, err } stmt, err := Preparex(p, q) if err != nil { return nil, err } return &NamedStmt{ QueryString: q, Params: args, Stmt: stmt, }, nil } // convertMapStringInterface attempts to convert v to map[string]interface{}. // Unlike v.(map[string]interface{}), this function works on named types that // are convertible to map[string]interface{} as well. func convertMapStringInterface(v interface{}) (map[string]interface{}, bool) { var m map[string]interface{} mtype := reflect.TypeOf(m) t := reflect.TypeOf(v) if !t.ConvertibleTo(mtype) { return nil, false } return reflect.ValueOf(v).Convert(mtype).Interface().(map[string]interface{}), true } func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { if maparg, ok := convertMapStringInterface(arg); ok { return bindMapArgs(names, maparg) } return bindArgs(names, arg, m) } // private interface to generate a list of interfaces from a given struct // type, given a list of names to pull out of the struct. Used by public // BindStruct interface. func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { arglist := make([]interface{}, 0, len(names)) // grab the indirected value of arg var v reflect.Value for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { v = v.Elem() } err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { if len(t) == 0 { return fmt.Errorf("could not find name %s in %#v", names[i], arg) } val := reflectx.FieldByIndexesReadOnly(v, t) arglist = append(arglist, val.Interface()) return nil }) return arglist, err } // like bindArgs, but for maps. func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { arglist := make([]interface{}, 0, len(names)) for _, name := range names { val, ok := arg[name] if !ok { return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) } arglist = append(arglist, val) } return arglist, nil } // bindStruct binds a named parameter query with fields from a struct argument. // The rules for binding field names to parameter names follow the same // conventions as for StructScan, including obeying the `db` struct tags. func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { bound, names, err := compileNamedQuery([]byte(query), bindType) if err != nil { return "", []interface{}{}, err } arglist, err := bindAnyArgs(names, arg, m) if err != nil { return "", []interface{}{}, err } return bound, arglist, nil } var valuesReg = regexp.MustCompile(`\)\s*(?i)VALUES\s*\(`) func findMatchingClosingBracketIndex(s string) int { count := 0 for i, ch := range s { if ch == '(' { count++ } if ch == ')' { count-- if count == 0 { return i } } } return 0 } func fixBound(bound string, loop int) string { loc := valuesReg.FindStringIndex(bound) // defensive guard when "VALUES (...)" not found if len(loc) < 2 { return bound } openingBracketIndex := loc[1] - 1 index := findMatchingClosingBracketIndex(bound[openingBracketIndex:]) // defensive guard. must have closing bracket if index == 0 { return bound } closingBracketIndex := openingBracketIndex + index + 1 var buffer bytes.Buffer buffer.WriteString(bound[0:closingBracketIndex]) for i := 0; i < loop-1; i++ { buffer.WriteString(",") buffer.WriteString(bound[openingBracketIndex:closingBracketIndex]) } buffer.WriteString(bound[closingBracketIndex:]) return buffer.String() } // bindArray binds a named parameter query with fields from an array or slice of // structs argument. func bindArray(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { // do the initial binding with QUESTION; if bindType is not question, // we can rebind it at the end. bound, names, err := compileNamedQuery([]byte(query), QUESTION) if err != nil { return "", []interface{}{}, err } arrayValue := reflect.ValueOf(arg) arrayLen := arrayValue.Len() if arrayLen == 0 { return "", []interface{}{}, fmt.Errorf("length of array is 0: %#v", arg) } var arglist = make([]interface{}, 0, len(names)*arrayLen) for i := 0; i < arrayLen; i++ { elemArglist, err := bindAnyArgs(names, arrayValue.Index(i).Interface(), m) if err != nil { return "", []interface{}{}, err } arglist = append(arglist, elemArglist...) } if arrayLen > 1 { bound = fixBound(bound, arrayLen) } // adjust binding type if we weren't on question if bindType != QUESTION { bound = Rebind(bindType, bound) } return bound, arglist, nil } // bindMap binds a named parameter query with a map of arguments. func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { bound, names, err := compileNamedQuery([]byte(query), bindType) if err != nil { return "", []interface{}{}, err } arglist, err := bindMapArgs(names, args) return bound, arglist, err } // -- Compilation of Named Queries // Allow digits and letters in bind params; additionally runes are // checked against underscores, meaning that bind params can have be // alphanumeric with underscores. Mind the difference between unicode // digits and numbers, where '5' is a digit but '五' is not. var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} // FIXME: this function isn't safe for unicode named params, as a failing test // can testify. This is not a regression but a failure of the original code // as well. It should be modified to range over runes in a string rather than // bytes, even though this is less convenient and slower. Hopefully the // addition of the prepared NamedStmt (which will only do this once) will make // up for the slightly slower ad-hoc NamedExec/NamedQuery. // compile a NamedQuery into an unbound query (using the '?' bindvar) and // a list of names. func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { names = make([]string, 0, 10) rebound := make([]byte, 0, len(qs)) inName := false last := len(qs) - 1 currentVar := 1 name := make([]byte, 0, 10) for i, b := range qs { // a ':' while we're in a name is an error if b == ':' { // if this is the second ':' in a '::' escape sequence, append a ':' if inName && i > 0 && qs[i-1] == ':' { rebound = append(rebound, ':') inName = false continue } else if inName { err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) return query, names, err } inName = true name = []byte{} } else if inName && i > 0 && b == '=' && len(name) == 0 { rebound = append(rebound, ':', '=') inName = false continue // if we're in a name, and this is an allowed character, continue } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { // append the byte to the name if we are in a name and not on the last byte name = append(name, b) // if we're in a name and it's not an allowed character, the name is done } else if inName { inName = false // if this is the final byte of the string and it is part of the name, then // make sure to add it to the name if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { name = append(name, b) } // add the string representation to the names list names = append(names, string(name)) // add a proper bindvar for the bindType switch bindType { // oracle only supports named type bind vars even for positional case NAMED: rebound = append(rebound, ':') rebound = append(rebound, name...) case QUESTION, UNKNOWN: rebound = append(rebound, '?') case DOLLAR: rebound = append(rebound, '$') for _, b := range strconv.Itoa(currentVar) { rebound = append(rebound, byte(b)) } currentVar++ case AT: rebound = append(rebound, '@', 'p') for _, b := range strconv.Itoa(currentVar) { rebound = append(rebound, byte(b)) } currentVar++ } // add this byte to string unless it was not part of the name if i != last { rebound = append(rebound, b) } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { rebound = append(rebound, b) } } else { // this is a normal byte and should just go onto the rebound query rebound = append(rebound, b) } } return string(rebound), names, err } // BindNamed binds a struct or a map to a query with named parameters. // DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { return bindNamedMapper(bindType, query, arg, mapper()) } // Named takes a query using named parameters and an argument and // returns a new query with a list of args that can be executed by // a database. The return value uses the `?` bindvar. func Named(query string, arg interface{}) (string, []interface{}, error) { return bindNamedMapper(QUESTION, query, arg, mapper()) } func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { t := reflect.TypeOf(arg) k := t.Kind() switch { case k == reflect.Map && t.Key().Kind() == reflect.String: m, ok := convertMapStringInterface(arg) if !ok { return "", nil, fmt.Errorf("sqlx.bindNamedMapper: unsupported map type: %T", arg) } return bindMap(bindType, query, m) case k == reflect.Array || k == reflect.Slice: return bindArray(bindType, query, arg, m) default: return bindStruct(bindType, query, arg, m) } } // NamedQuery binds a named query and then runs Query on the result using the // provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with // map[string]interface{} types. func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) if err != nil { return nil, err } return e.Queryx(q, args...) } // NamedExec uses BindStruct to get a query executable by the driver and // then runs Exec on the result. Returns an error from the binding // or the query execution itself. func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) if err != nil { return nil, err } return e.Exec(q, args...) } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/Makefile0000644000000000000000000000110715024302467022556 0ustar rootroot.ONESHELL: SHELL = /bin/sh .SHELLFLAGS = -ec BASE_PACKAGE := github.com/jmoiron/sqlx tooling: go install honnef.co/go/tools/cmd/staticcheck@v0.4.7 go install golang.org/x/vuln/cmd/govulncheck@v1.0.4 go install golang.org/x/tools/cmd/goimports@v0.20.0 has-changes: git diff --exit-code --quiet HEAD -- lint: go vet ./... staticcheck -checks=all ./... fmt: go list -f '{{.Dir}}' ./... | xargs -I {} goimports -local $(BASE_PACKAGE) -w {} vuln-check: govulncheck ./... test-race: go test -v -race -count=1 ./... update-dependencies: go get -u -t -v ./... go mod tidy dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/named_context_test.go0000644000000000000000000000711215024302467025336 0ustar rootroot//go:build go1.8 // +build go1.8 package sqlx import ( "context" "database/sql" "testing" ) func TestNamedContextQueries(t *testing.T) { RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) test := Test{t} var ns *NamedStmt var err error ctx := context.Background() // Check that invalid preparations fail _, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first:name") if err == nil { t.Error("Expected an error with invalid prepared statement.") } _, err = db.PrepareNamedContext(ctx, "invalid sql") if err == nil { t.Error("Expected an error with invalid prepared statement.") } // Check closing works as anticipated ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first_name") test.Error(err) err = ns.Close() test.Error(err) ns, err = db.PrepareNamedContext(ctx, ` SELECT first_name, last_name, email FROM person WHERE first_name=:first_name AND email=:email`) test.Error(err) // test Queryx w/ uses Query p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"} rows, err := ns.QueryxContext(ctx, p) test.Error(err) for rows.Next() { var p2 Person rows.StructScan(&p2) if p.FirstName != p2.FirstName { t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName) } if p.LastName != p2.LastName { t.Errorf("got %s, expected %s", p.LastName, p2.LastName) } if p.Email != p2.Email { t.Errorf("got %s, expected %s", p.Email, p2.Email) } } // test Select people := make([]Person, 0, 5) err = ns.SelectContext(ctx, &people, p) test.Error(err) if len(people) != 1 { t.Errorf("got %d results, expected %d", len(people), 1) } if p.FirstName != people[0].FirstName { t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName) } if p.LastName != people[0].LastName { t.Errorf("got %s, expected %s", p.LastName, people[0].LastName) } if p.Email != people[0].Email { t.Errorf("got %s, expected %s", p.Email, people[0].Email) } // test Exec ns, err = db.PrepareNamedContext(ctx, ` INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)`) test.Error(err) js := Person{ FirstName: "Julien", LastName: "Savea", Email: "jsavea@ab.co.nz", } _, err = ns.ExecContext(ctx, js) test.Error(err) // Make sure we can pull him out again p2 := Person{} db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email) if p2.Email != js.Email { t.Errorf("expected %s, got %s", js.Email, p2.Email) } // test Txn NamedStmts tx := db.MustBeginTx(ctx, nil) txns := tx.NamedStmtContext(ctx, ns) // We're going to add Steven in this txn sl := Person{ FirstName: "Steven", LastName: "Luatua", Email: "sluatua@ab.co.nz", } _, err = txns.ExecContext(ctx, sl) test.Error(err) // then rollback... tx.Rollback() // looking for Steven after a rollback should fail err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) if err != sql.ErrNoRows { t.Errorf("expected no rows error, got %v", err) } // now do the same, but commit tx = db.MustBeginTx(ctx, nil) txns = tx.NamedStmtContext(ctx, ns) _, err = txns.ExecContext(ctx, sl) test.Error(err) tx.Commit() // looking for Steven after a Commit should succeed err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) test.Error(err) if p2.Email != sl.Email { t.Errorf("expected %s, got %s", sl.Email, p2.Email) } }) } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/go.sum0000644000000000000000000000124415024302467022253 0ustar rootrootfilippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/.gitignore0000644000000000000000000000041715024302467023111 0ustar rootroot# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test .idea # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe tags environ dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/named_test.go0000644000000000000000000003113015024302467023567 0ustar rootrootpackage sqlx import ( "database/sql" "fmt" "testing" ) func TestCompileQuery(t *testing.T) { table := []struct { Q, R, D, T, N string V []string }{ // basic test for named parameters, invalid char ',' terminating { Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`, R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`, D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`, T: `INSERT INTO foo (a,b,c,d) VALUES (@p1, @p2, @p3, @p4)`, N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`, V: []string{"name", "age", "first", "last"}, }, // This query tests a named parameter ending the string as well as numbers { Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`, R: `SELECT * FROM a WHERE first_name=? AND last_name=?`, D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`, T: `SELECT * FROM a WHERE first_name=@p1 AND last_name=@p2`, N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`, V: []string{"name1", "name2"}, }, { Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`, R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`, D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`, T: `SELECT ":foo" FROM a WHERE first_name=@p1 AND last_name=@p2`, N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`, V: []string{"name1", "name2"}, }, { Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`, R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`, D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`, T: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=@p1 AND last_name=@p2`, N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`, V: []string{"first_name", "last_name"}, }, { Q: `SELECT @name := "name", :age, :first, :last`, R: `SELECT @name := "name", ?, ?, ?`, D: `SELECT @name := "name", $1, $2, $3`, N: `SELECT @name := "name", :age, :first, :last`, T: `SELECT @name := "name", @p1, @p2, @p3`, V: []string{"age", "first", "last"}, }, /* This unicode awareness test sadly fails, because of our byte-wise worldview. * We could certainly iterate by Rune instead, though it's a great deal slower, * it's probably the RightWay(tm) { Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`, R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`, D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`, N: []string{"name", "age", "first", "last"}, }, */ } for _, test := range table { qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION) if err != nil { t.Error(err) } if qr != test.R { t.Errorf("expected %s, got %s", test.R, qr) } if len(names) != len(test.V) { t.Errorf("expected %#v, got %#v", test.V, names) } else { for i, name := range names { if name != test.V[i] { t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name) } } } qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR) if qd != test.D { t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd) } qt, _, _ := compileNamedQuery([]byte(test.Q), AT) if qt != test.T { t.Errorf("\nexpected: `%s`\ngot: `%s`", test.T, qt) } qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED) if qq != test.N { t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq)) } } } type Test struct { t *testing.T } func (t Test) Error(err error, msg ...interface{}) { t.t.Helper() if err != nil { if len(msg) == 0 { t.t.Error(err) } else { t.t.Error(msg...) } } } func (t Test) Errorf(err error, format string, args ...interface{}) { t.t.Helper() if err != nil { t.t.Errorf(format, args...) } } func TestEscapedColons(t *testing.T) { t.Skip("not sure it is possible to support this in general case without an SQL parser") var qs = `SELECT * FROM testtable WHERE timeposted BETWEEN (now() AT TIME ZONE 'utc') AND (now() AT TIME ZONE 'utc') - interval '01:30:00') AND name = '\'this is a test\'' and id = :id` _, _, err := compileNamedQuery([]byte(qs), DOLLAR) if err != nil { t.Error("Didn't handle colons correctly when inside a string") } } func TestNamedQueries(t *testing.T) { RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T, now string) { loadDefaultFixture(db, t) test := Test{t} var ns *NamedStmt var err error // Check that invalid preparations fail _, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name") if err == nil { t.Error("Expected an error with invalid prepared statement.") } _, err = db.PrepareNamed("invalid sql") if err == nil { t.Error("Expected an error with invalid prepared statement.") } // Check closing works as anticipated ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name") test.Error(err) err = ns.Close() test.Error(err) ns, err = db.PrepareNamed(` SELECT first_name, last_name, email FROM person WHERE first_name=:first_name AND email=:email`) test.Error(err) // test Queryx w/ uses Query p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"} rows, err := ns.Queryx(p) test.Error(err) for rows.Next() { var p2 Person rows.StructScan(&p2) if p.FirstName != p2.FirstName { t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName) } if p.LastName != p2.LastName { t.Errorf("got %s, expected %s", p.LastName, p2.LastName) } if p.Email != p2.Email { t.Errorf("got %s, expected %s", p.Email, p2.Email) } } // test Select people := make([]Person, 0, 5) err = ns.Select(&people, p) test.Error(err) if len(people) != 1 { t.Errorf("got %d results, expected %d", len(people), 1) } if p.FirstName != people[0].FirstName { t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName) } if p.LastName != people[0].LastName { t.Errorf("got %s, expected %s", p.LastName, people[0].LastName) } if p.Email != people[0].Email { t.Errorf("got %s, expected %s", p.Email, people[0].Email) } // test struct batch inserts sls := []Person{ {FirstName: "Ardie", LastName: "Savea", Email: "asavea@ab.co.nz"}, {FirstName: "Sonny Bill", LastName: "Williams", Email: "sbw@ab.co.nz"}, {FirstName: "Ngani", LastName: "Laumape", Email: "nlaumape@ab.co.nz"}, } insert := fmt.Sprintf( "INSERT INTO person (first_name, last_name, email, added_at) VALUES (:first_name, :last_name, :email, %v)\n", now, ) _, err = db.NamedExec(insert, sls) test.Error(err) // test map batch inserts slsMap := []map[string]interface{}{ {"first_name": "Ardie", "last_name": "Savea", "email": "asavea@ab.co.nz"}, {"first_name": "Sonny Bill", "last_name": "Williams", "email": "sbw@ab.co.nz"}, {"first_name": "Ngani", "last_name": "Laumape", "email": "nlaumape@ab.co.nz"}, } _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email) ;--`, slsMap) test.Error(err) type A map[string]interface{} typedMap := []A{ {"first_name": "Ardie", "last_name": "Savea", "email": "asavea@ab.co.nz"}, {"first_name": "Sonny Bill", "last_name": "Williams", "email": "sbw@ab.co.nz"}, {"first_name": "Ngani", "last_name": "Laumape", "email": "nlaumape@ab.co.nz"}, } _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email) ;--`, typedMap) test.Error(err) for _, p := range sls { dest := Person{} err = db.Get(&dest, db.Rebind("SELECT * FROM person WHERE email=?"), p.Email) test.Error(err) if dest.Email != p.Email { t.Errorf("expected %s, got %s", p.Email, dest.Email) } } // test Exec ns, err = db.PrepareNamed(` INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)`) test.Error(err) js := Person{ FirstName: "Julien", LastName: "Savea", Email: "jsavea@ab.co.nz", } _, err = ns.Exec(js) test.Error(err) // Make sure we can pull him out again p2 := Person{} db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email) if p2.Email != js.Email { t.Errorf("expected %s, got %s", js.Email, p2.Email) } // test Txn NamedStmts tx := db.MustBegin() txns := tx.NamedStmt(ns) // We're going to add Steven in this txn sl := Person{ FirstName: "Steven", LastName: "Luatua", Email: "sluatua@ab.co.nz", } _, err = txns.Exec(sl) test.Error(err) // then rollback... tx.Rollback() // looking for Steven after a rollback should fail err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) if err != sql.ErrNoRows { t.Errorf("expected no rows error, got %v", err) } // now do the same, but commit tx = db.MustBegin() txns = tx.NamedStmt(ns) _, err = txns.Exec(sl) test.Error(err) tx.Commit() // looking for Steven after a Commit should succeed err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email) test.Error(err) if p2.Email != sl.Email { t.Errorf("expected %s, got %s", sl.Email, p2.Email) } }) } func TestFixBounds(t *testing.T) { table := []struct { name, query, expect string loop int }{ { name: `named syntax`, query: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`, expect: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last),(:name, :age, :first, :last)`, loop: 2, }, { name: `mysql syntax`, query: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`, expect: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?),(?, ?, ?, ?)`, loop: 2, }, { name: `named syntax w/ trailer`, query: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last) ;--`, expect: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last),(:name, :age, :first, :last) ;--`, loop: 2, }, { name: `mysql syntax w/ trailer`, query: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?) ;--`, expect: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?),(?, ?, ?, ?) ;--`, loop: 2, }, { name: `not found test`, query: `INSERT INTO foo (a,b,c,d) (:name, :age, :first, :last)`, expect: `INSERT INTO foo (a,b,c,d) (:name, :age, :first, :last)`, loop: 2, }, { name: `found twice test`, query: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last) VALUES (:name, :age, :first, :last)`, expect: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last),(:name, :age, :first, :last) VALUES (:name, :age, :first, :last)`, loop: 2, }, { name: `nospace`, query: `INSERT INTO foo (a,b) VALUES(:a, :b)`, expect: `INSERT INTO foo (a,b) VALUES(:a, :b),(:a, :b)`, loop: 2, }, { name: `lowercase`, query: `INSERT INTO foo (a,b) values(:a, :b)`, expect: `INSERT INTO foo (a,b) values(:a, :b),(:a, :b)`, loop: 2, }, { name: `on duplicate key using VALUES`, query: `INSERT INTO foo (a,b) VALUES (:a, :b) ON DUPLICATE KEY UPDATE a=VALUES(a)`, expect: `INSERT INTO foo (a,b) VALUES (:a, :b),(:a, :b) ON DUPLICATE KEY UPDATE a=VALUES(a)`, loop: 2, }, { name: `single column`, query: `INSERT INTO foo (a) VALUES (:a)`, expect: `INSERT INTO foo (a) VALUES (:a),(:a)`, loop: 2, }, { name: `call now`, query: `INSERT INTO foo (a, b) VALUES (:a, NOW())`, expect: `INSERT INTO foo (a, b) VALUES (:a, NOW()),(:a, NOW())`, loop: 2, }, { name: `two level depth function call`, query: `INSERT INTO foo (a, b) VALUES (:a, YEAR(NOW()))`, expect: `INSERT INTO foo (a, b) VALUES (:a, YEAR(NOW())),(:a, YEAR(NOW()))`, loop: 2, }, { name: `missing closing bracket`, query: `INSERT INTO foo (a, b) VALUES (:a, YEAR(NOW())`, expect: `INSERT INTO foo (a, b) VALUES (:a, YEAR(NOW())`, loop: 2, }, { name: `table with "values" at the end`, query: `INSERT INTO table_values (a, b) VALUES (:a, :b)`, expect: `INSERT INTO table_values (a, b) VALUES (:a, :b),(:a, :b)`, loop: 2, }, { name: `multiline indented query`, query: `INSERT INTO foo ( a, b, c, d ) VALUES ( :name, :age, :first, :last )`, expect: `INSERT INTO foo ( a, b, c, d ) VALUES ( :name, :age, :first, :last ),( :name, :age, :first, :last )`, loop: 2, }, } for _, tc := range table { t.Run(tc.name, func(t *testing.T) { res := fixBound(tc.query, tc.loop) if res != tc.expect { t.Errorf("mismatched results") } }) } } dependencies/pkg/mod/github.com/jmoiron/sqlx@v1.4.0/sqlx_context.go0000644000000000000000000003737715024302467024222 0ustar rootroot//go:build go1.8 // +build go1.8 package sqlx import ( "context" "database/sql" "fmt" "io/ioutil" "path/filepath" "reflect" ) // ConnectContext to a database and verify with a ping. func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { db, err := Open(driverName, dataSourceName) if err != nil { return db, err } err = db.PingContext(ctx) return db, err } // QueryerContext is an interface used by GetContext and SelectContext type QueryerContext interface { QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row } // PreparerContext is an interface used by PreparexContext. type PreparerContext interface { PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) } // ExecerContext is an interface used by MustExecContext and LoadFileContext type ExecerContext interface { ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) } // ExtContext is a union interface which can bind, query, and exec, with Context // used by NamedQueryContext and NamedExecContext. type ExtContext interface { binder QueryerContext ExecerContext } // SelectContext executes a query using the provided Queryer, and StructScans // each row into dest, which must be a slice. If the slice elements are // scannable, then the result set must have only one column. Otherwise, // StructScan is used. The *sql.Rows are closed automatically. // Any placeholder parameters are replaced with supplied args. func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { rows, err := q.QueryxContext(ctx, query, args...) if err != nil { return err } // if something happens here, we want to make sure the rows are Closed defer rows.Close() return scanAll(rows, dest, false) } // PreparexContext prepares a statement. // // The provided context is used for the preparation of the statement, not for // the execution of the statement. func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { s, err := p.PrepareContext(ctx, query) if err != nil { return nil, err } return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err } // GetContext does a QueryRow using the provided Queryer, and scans the // resulting row to dest. If dest is scannable, the result must only have one // column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like // row.Scan would. Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { r := q.QueryRowxContext(ctx, query, args...) return r.scanAny(dest, false) } // LoadFileContext exec's every statement in a file (as a single call to Exec). // LoadFileContext may return a nil *sql.Result if errors are encountered // locating or reading the file at path. LoadFile reads the entire file into // memory, so it is not suitable for loading large data dumps, but can be useful // for initializing schemas or loading indexes. // // FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 // or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting // this by requiring something with DriverName() and then attempting to split the // queries will be difficult to get right, and its current driver-specific behavior // is deemed at least not complex in its incorrectness. func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { realpath, err := filepath.Abs(path) if err != nil { return nil, err } contents, err := ioutil.ReadFile(realpath) if err != nil { return nil, err } res, err := e.ExecContext(ctx, string(contents)) return &res, err } // MustExecContext execs the query using e and panics if there was an error. // Any placeholder parameters are replaced with supplied args. func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { res, err := e.ExecContext(ctx, query, args...) if err != nil { panic(err) } return res } // PrepareNamedContext returns an sqlx.NamedStmt func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { return prepareNamedContext(ctx, db, query) } // NamedQueryContext using this DB. // Any named placeholder parameters are replaced with fields from arg. func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { return NamedQueryContext(ctx, db, query, arg) } // NamedExecContext using this DB. // Any named placeholder parameters are replaced with fields from arg. func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { return NamedExecContext(ctx, db, query, arg) } // SelectContext using this DB. // Any placeholder parameters are replaced with supplied args. func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { return SelectContext(ctx, db, dest, query, args...) } // GetContext using this DB. // Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { return GetContext(ctx, db, dest, query, args...) } // PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. // // The provided context is used for the preparation of the statement, not for // the execution of the statement. func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { return PreparexContext(ctx, db, query) } // QueryxContext queries the database and returns an *sqlx.Rows. // Any placeholder parameters are replaced with supplied args. func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { r, err := db.DB.QueryContext(ctx, query, args...) if err != nil { return nil, err } return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err } // QueryRowxContext queries the database and returns an *sqlx.Row. // Any placeholder parameters are replaced with supplied args. func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { rows, err := db.DB.QueryContext(ctx, query, args...) return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} } // MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead // of an *sql.Tx. // // The provided context is used until the transaction is committed or rolled // back. If the context is canceled, the sql package will roll back the // transaction. Tx.Commit will return an error if the context provided to // MustBeginContext is canceled. func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { tx, err := db.BeginTxx(ctx, opts) if err != nil { panic(err) } return tx } // MustExecContext (panic) runs MustExec using this database. // Any placeholder parameters are replaced with supplied args. func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { return MustExecContext(ctx, db, query, args...) } // BeginTxx begins a transaction and returns an *sqlx.Tx instead of an // *sql.Tx. // // The provided context is used until the transaction is committed or rolled // back. If the context is canceled, the sql package will roll back the // transaction. Tx.Commit will return an error if the context provided to // BeginxContext is canceled. func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { tx, err := db.DB.BeginTx(ctx, opts) if err != nil { return nil, err } return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err } // Connx returns an *sqlx.Conn instead of an *sql.Conn. func (db *DB) Connx(ctx context.Context) (*Conn, error) { conn, err := db.DB.Conn(ctx) if err != nil { return nil, err } return &Conn{Conn: conn, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, nil } // BeginTxx begins a transaction and returns an *sqlx.Tx instead of an // *sql.Tx. // // The provided context is used until the transaction is committed or rolled // back. If the context is canceled, the sql package will roll back the // transaction. Tx.Commit will return an error if the context provided to // BeginxContext is canceled. func (c *Conn) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { tx, err := c.Conn.BeginTx(ctx, opts) if err != nil { return nil, err } return &Tx{Tx: tx, driverName: c.driverName, unsafe: c.unsafe, Mapper: c.Mapper}, err } // SelectContext using this Conn. // Any placeholder parameters are replaced with supplied args. func (c *Conn) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { return SelectContext(ctx, c, dest, query, args...) } // GetContext using this Conn. // Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func (c *Conn) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { return GetContext(ctx, c, dest, query, args...) } // PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. // // The provided context is used for the preparation of the statement, not for // the execution of the statement. func (c *Conn) PreparexContext(ctx context.Context, query string) (*Stmt, error) { return PreparexContext(ctx, c, query) } // QueryxContext queries the database and returns an *sqlx.Rows. // Any placeholder parameters are replaced with supplied args. func (c *Conn) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { r, err := c.Conn.QueryContext(ctx, query, args...) if err != nil { return nil, err } return &Rows{Rows: r, unsafe: c.unsafe, Mapper: c.Mapper}, err } // QueryRowxContext queries the database and returns an *sqlx.Row. // Any placeholder parameters are replaced with supplied args. func (c *Conn) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { rows, err := c.Conn.QueryContext(ctx, query, args...) return &Row{rows: rows, err: err, unsafe: c.unsafe, Mapper: c.Mapper} } // Rebind a query within a Conn's bindvar type. func (c *Conn) Rebind(query string) string { return Rebind(BindType(c.driverName), query) } // StmtxContext returns a version of the prepared statement which runs within a // transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { var s *sql.Stmt switch v := stmt.(type) { case Stmt: s = v.Stmt case *Stmt: s = v.Stmt case *sql.Stmt: s = v default: panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) } return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} } // NamedStmtContext returns a version of the prepared statement which runs // within a transaction. func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { return &NamedStmt{ QueryString: stmt.QueryString, Params: stmt.Params, Stmt: tx.StmtxContext(ctx, stmt.Stmt), } } // PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. // // The provided context is used for the preparation of the statement, not for // the execution of the statement. func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { return PreparexContext(ctx, tx, query) } // PrepareNamedContext returns an sqlx.NamedStmt func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { return prepareNamedContext(ctx, tx, query) } // MustExecContext runs MustExecContext within a transaction. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { return MustExecContext(ctx, tx, query, args...) } // QueryxContext within a transaction and context. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { r, err := tx.Tx.QueryContext(ctx, query, args...) if err != nil { return nil, err } return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err } // SelectContext within a transaction and context. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { return SelectContext(ctx, tx, dest, query, args...) } // GetContext within a transaction and context. // Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { return GetContext(ctx, tx, dest, query, args...) } // QueryRowxContext within a transaction and context. // Any placeholder parameters are replaced with supplied args. func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { rows, err := tx.Tx.QueryContext(ctx, query, args...) return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} } // NamedExecContext using this Tx. // Any named placeholder parameters are replaced with fields from arg. func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { return NamedExecContext(ctx, tx, query, arg) } // SelectContext using the prepared statement. // Any placeholder parameters are replaced with supplied args. func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { return SelectContext(ctx, &qStmt{s}, dest, "", args...) } // GetContext using the prepared statement. // Any placeholder parameters are replaced with supplied args. // An error is returned if the result set is empty. func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { return GetContext(ctx, &qStmt{s}, dest, "", args...) } // MustExecContext (panic) using this statement. Note that the query portion of // the error output will be blank, as Stmt does not expose its query. // Any placeholder parameters are replaced with supplied args. func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { return MustExecContext(ctx, &qStmt{s}, "", args...) } // QueryRowxContext using this statement. // Any placeholder parameters are replaced with supplied args. func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { qs := &qStmt{s} return qs.QueryRowxContext(ctx, "", args...) } // QueryxContext using this statement. // Any placeholder parameters are replaced with supplied args. func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { qs := &qStmt{s} return qs.QueryxContext(ctx, "", args...) } func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { return q.Stmt.QueryContext(ctx, args...) } func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { r, err := q.Stmt.QueryContext(ctx, args...) if err != nil { return nil, err } return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err } func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { rows, err := q.Stmt.QueryContext(ctx, args...) return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} } func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { return q.Stmt.ExecContext(ctx, args...) } dependencies/pkg/mod/github.com/!vivid!cortex/0000775000000000000000000000000015024302472020252 5ustar rootrootdependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/0000755000000000000000000000000015024302472022046 5ustar rootrootdependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/LICENSE0000644000000000000000000000206015024302472023051 0ustar rootrootThe MIT License Copyright (c) 2013 VividCortex Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/.whitesource0000644000000000000000000000010615024302472024405 0ustar rootroot{ "settingsInheritedFrom": "VividCortex/whitesource-config@master" }dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/go.mod0000644000000000000000000000005415024302472023153 0ustar rootrootmodule github.com/VividCortex/ewma go 1.12 dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/README.md0000644000000000000000000001467515024302472023342 0ustar rootroot# EWMA [![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) ![build](https://github.com/VividCortex/ewma/workflows/build/badge.svg) [![codecov](https://codecov.io/gh/VividCortex/ewma/branch/master/graph/badge.svg)](https://codecov.io/gh/VividCortex/ewma) This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/). ### Exponentially Weighted Moving Average An exponentially weighted moving average is a way to continuously compute a type of average for a series of numbers, as the numbers arrive. After a value in the series is added to the average, its weight in the average decreases exponentially over time. This biases the average towards more recent data. EWMAs are useful for several reasons, chiefly their inexpensive computational and memory cost, as well as the fact that they represent the recent central tendency of the series of values. The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average is biased towards recent history. The alpha must be between 0 and 1, and is typically a fairly small number, such as 0.04. We will discuss the choice of alpha later. The algorithm works thus, in pseudocode: 1. Multiply the next number in the series by alpha. 2. Multiply the current value of the average by 1 minus alpha. 3. Add the result of steps 1 and 2, and store it as the new current value of the average. 4. Repeat for each number in the series. There are special-case behaviors for how to initialize the current value, and these vary between implementations. One approach is to start with the first value in the series; another is to average the first 10 or so values in the series using an arithmetic average, and then begin the incremental updating of the average. Each method has pros and cons. It may help to look at it pictorially. Suppose the series has five numbers, and we choose alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300. ![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png) Now let's take the moving average of those numbers. First we set the average to the value of the first number. ![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png) Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add them to generate a new value. ![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png) This continues until we are done. ![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png) Notice how each of the values in the series decays by half each time a new value is added, and the top of the bars in the lower portion of the image represents the size of the moving average. It is a smoothed, or low-pass, average of the original series. For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia. ### Choosing Alpha Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average) that averages over the previous N samples. What is the average age of each sample? It is N/2. Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book "Production and Operations Analysis" by Steven Nahmias. So, for example, if you have a time-series with samples once per second, and you want to get the moving average over the previous minute, you should use an alpha of .032786885. This, by the way, is the constant alpha used for this repository's SimpleEWMA. ### Implementations This repository contains two implementations of the EWMA algorithm, with different properties. The implementations all conform to the MovingAverage interface, and the constructor returns that type. Current implementations assume an implicit time interval of 1.0 between every sample added. That is, the passage of time is treated as though it's the same as the arrival of samples. If you need time-based decay when samples are not arriving precisely at set intervals, then this package will not support your needs at present. #### SimpleEWMA A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA for multiple reasons. It has no warm-up period and it uses a constant decay. These properties let it use less memory. It will also behave differently when it's equal to zero, which is assumed to mean uninitialized, so if a value is likely to actually become zero over time, then any non-zero value will cause a sharp jump instead of a small change. #### VariableEWMA Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory. It also has a "warmup" time when you start adding values to it. It will report a value of 0.0 until you have added the required number of samples to it. It uses some memory to store the number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA. ## Usage ### API Documentation View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma). ```go package main import "github.com/VividCortex/ewma" func main() { samples := [100]float64{ 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, } e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1) for _, f := range samples { e.Add(f) a.Add(f) } e.Value() //=> 13.577404704631077 a.Value() //=> 1.5806140565521463e-12 } ``` ## Contributing We only accept pull requests for minor fixes or improvements. This includes: * Small bug fixes * Typos * Documentation or comments Please open issues to discuss new features. Pull requests for new features will be rejected, so we recommend forking the repository and making changes in your fork for your use case. ## License This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved. It is licensed under the MIT license. Please see the LICENSE file for applicable license terms. dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/ewma_test.go0000644000000000000000000000517015024302472024370 0ustar rootrootpackage ewma // Copyright (c) 2013 VividCortex, Inc. All rights reserved. // Please see the LICENSE file for applicable license terms. import ( "math" "testing" ) const testMargin = 0.00000001 var samples = [100]float64{ 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, 4948, 4994, 6056, 4417, 4973, 4714, 4964, 5280, 5074, 4913, 4119, 4522, 4631, 4341, 4909, 4750, 4663, 5167, 3683, 4964, 5151, 4892, 4171, 5097, 3546, 4144, 4551, 6557, 4234, 5026, 5220, 4144, 5547, 4747, 4732, 5327, 5442, 4176, 4907, 3570, 4684, 4161, 5206, 4952, 4317, 4819, 4668, 4603, 4885, 4645, 4401, 4362, 5035, 3954, 4738, 4545, 5433, 6326, 5927, 4983, 5364, 4598, 5071, 5231, 5250, 4621, 4269, 3953, 3308, 3623, 5264, 5322, 5395, 4753, 4936, 5315, 5243, 5060, 4989, 4921, 4480, 3426, 3687, 4220, 3197, 5139, 6101, 5279, } func withinMargin(a, b float64) bool { return math.Abs(a-b) <= testMargin } func TestSimpleEWMA(t *testing.T) { var e SimpleEWMA for _, f := range samples { e.Add(f) } if !withinMargin(e.Value(), 4734.500946466118) { t.Errorf("e.Value() is %v, wanted %v", e.Value(), 4734.500946466118) } e.Set(1.0) if e.Value() != 1.0 { t.Errorf("e.Value() is %v", e.Value()) } } func TestVariableEWMA(t *testing.T) { e := NewMovingAverage(30) for _, f := range samples { e.Add(f) } if !withinMargin(e.Value(), 4734.500946466118) { t.Errorf("e.Value() is %v, wanted %v", e.Value(), 4734.500946466118) } e.Set(1.0) if e.Value() != 1.0 { t.Errorf("e.Value() is %v", e.Value()) } } func TestVariableEWMA2(t *testing.T) { e := NewMovingAverage(5) for _, f := range samples { e.Add(f) } if !withinMargin(e.Value(), 5015.397367486725) { t.Errorf("e.Value() is %v, wanted %v", e.Value(), 5015.397367486725) } } func TestVariableEWMAWarmup(t *testing.T) { e := NewMovingAverage(5) for i, f := range samples { e.Add(f) // all values returned during warmup should be 0.0 if uint8(i) < WARMUP_SAMPLES { if e.Value() != 0.0 { t.Errorf("e.Value() is %v, expected %v", e.Value(), 0.0) } } } e = NewMovingAverage(5) e.Set(5) e.Add(1) if e.Value() >= 5 { t.Errorf("e.Value() is %v, expected it to decay towards 0", e.Value()) } } func TestVariableEWMAWarmup2(t *testing.T) { e := NewMovingAverage(5) testSamples := [12]float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10000, 1} for i, f := range testSamples { e.Add(f) // all values returned during warmup should be 0.0 if uint8(i) < WARMUP_SAMPLES { if e.Value() != 0.0 { t.Errorf("e.Value() is %v, expected %v", e.Value(), 0.0) } } } if val := e.Value(); val == 1.0 { t.Errorf("e.Value() is expected to be greater than %v", 1.0) } } dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/ewma.go0000644000000000000000000001050215024302472023324 0ustar rootroot// Package ewma implements exponentially weighted moving averages. package ewma // Copyright (c) 2013 VividCortex, Inc. All rights reserved. // Please see the LICENSE file for applicable license terms. const ( // By default, we average over a one-minute period, which means the average // age of the metrics in the period is 30 seconds. AVG_METRIC_AGE float64 = 30.0 // The formula for computing the decay factor from the average age comes // from "Production and Operations Analysis" by Steven Nahmias. DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1) // For best results, the moving average should not be initialized to the // samples it sees immediately. The book "Production and Operations // Analysis" by Steven Nahmias suggests initializing the moving average to // the mean of the first 10 samples. Until the VariableEwma has seen this // many samples, it is not "ready" to be queried for the value of the // moving average. This adds some memory cost. WARMUP_SAMPLES uint8 = 10 ) // MovingAverage is the interface that computes a moving average over a time- // series stream of numbers. The average may be over a window or exponentially // decaying. type MovingAverage interface { Add(float64) Value() float64 Set(float64) } // NewMovingAverage constructs a MovingAverage that computes an average with the // desired characteristics in the moving window or exponential decay. If no // age is given, it constructs a default exponentially weighted implementation // that consumes minimal memory. The age is related to the decay factor alpha // by the formula given for the DECAY constant. It signifies the average age // of the samples as time goes to infinity. func NewMovingAverage(age ...float64) MovingAverage { if len(age) == 0 || age[0] == AVG_METRIC_AGE { return new(SimpleEWMA) } return &VariableEWMA{ decay: 2 / (age[0] + 1), } } // A SimpleEWMA represents the exponentially weighted moving average of a // series of numbers. It WILL have different behavior than the VariableEWMA // for multiple reasons. It has no warm-up period and it uses a constant // decay. These properties let it use less memory. It will also behave // differently when it's equal to zero, which is assumed to mean // uninitialized, so if a value is likely to actually become zero over time, // then any non-zero value will cause a sharp jump instead of a small change. // However, note that this takes a long time, and the value may just // decays to a stable value that's close to zero, but which won't be mistaken // for uninitialized. See http://play.golang.org/p/litxBDr_RC for example. type SimpleEWMA struct { // The current value of the average. After adding with Add(), this is // updated to reflect the average of all values seen thus far. value float64 } // Add adds a value to the series and updates the moving average. func (e *SimpleEWMA) Add(value float64) { if e.value == 0 { // this is a proxy for "uninitialized" e.value = value } else { e.value = (value * DECAY) + (e.value * (1 - DECAY)) } } // Value returns the current value of the moving average. func (e *SimpleEWMA) Value() float64 { return e.value } // Set sets the EWMA's value. func (e *SimpleEWMA) Set(value float64) { e.value = value } // VariableEWMA represents the exponentially weighted moving average of a series of // numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory. type VariableEWMA struct { // The multiplier factor by which the previous samples decay. decay float64 // The current value of the average. value float64 // The number of samples added to this instance. count uint8 } // Add adds a value to the series and updates the moving average. func (e *VariableEWMA) Add(value float64) { switch { case e.count < WARMUP_SAMPLES: e.count++ e.value += value case e.count == WARMUP_SAMPLES: e.count++ e.value = e.value / float64(WARMUP_SAMPLES) e.value = (value * e.decay) + (e.value * (1 - e.decay)) default: e.value = (value * e.decay) + (e.value * (1 - e.decay)) } } // Value returns the current value of the average, or 0.0 if the series hasn't // warmed up yet. func (e *VariableEWMA) Value() float64 { if e.count <= WARMUP_SAMPLES { return 0.0 } return e.value } // Set sets the EWMA's value. func (e *VariableEWMA) Set(value float64) { e.value = value if e.count <= WARMUP_SAMPLES { e.count = WARMUP_SAMPLES + 1 } } dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/.gitignore0000644000000000000000000000003615024302472024035 0ustar rootroot.DS_Store .*.sw? /coverage.txtdependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/.github/0000755000000000000000000000000015024302472023406 5ustar rootrootdependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/.github/PULL_REQUEST_TEMPLATE.md0000644000000000000000000000056015024302472027210 0ustar rootrootBefore you create a pull request, please consider: We only accept pull requests for minor fixes or improvements. This includes: * Small bug fixes * Typos * Documentation or comments Please open issues to discuss new features. Pull requests for new features will be rejected, so we recommend forking the repository and making changes in your fork for your use case. dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/.github/ISSUE_TEMPLATE.md0000644000000000000000000000055015024302472026113 0ustar rootrootBefore you file an issue, please consider: We only accept pull requests for minor fixes or improvements. This includes: * Small bug fixes * Typos * Documentation or comments Please open issues to discuss new features. Pull requests for new features will be rejected, so we recommend forking the repository and making changes in your fork for your use case. dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/.github/workflows/0000755000000000000000000000000015024302472025443 5ustar rootrootdependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/.github/workflows/build.yml0000644000000000000000000000225515024302472027271 0ustar rootrootname: build on: push: branches: - master paths-ignore: - .github/** - .gitignore - .whitesource - codecov.yml - README.md pull_request: paths-ignore: - .github/** - .gitignore - .whitesource - codecov.yml - README.md jobs: build: runs-on: ubuntu-latest strategy: fail-fast: false matrix: include: - go: 1.15 build-with: true - go: 1.16 build-with: false continue-on-error: ${{ matrix.build-with == false }} name: Build with ${{ matrix.go }} env: GO111MODULE: on steps: - name: Set up Go uses: actions/setup-go@v1 with: go-version: ${{ matrix.go }} - name: Checkout code uses: actions/checkout@v2 - name: Vet run: go vet ./... - name: Test run: go test -vet=off -race -coverprofile=coverage.txt -covermode=atomic ./... - name: Upload code coverage report if: matrix.build-with == true env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} run: bash <(curl -s https://raw.githubusercontent.com/VividCortex/codecov-bash/master/codecov) dependencies/pkg/mod/github.com/!vivid!cortex/ewma@v1.2.0/codecov.yml0000644000000000000000000000012615024302472024212 0ustar rootrootcoverage: status: project: default: threshold: 15% patch: off dependencies/pkg/mod/github.com/cespare/0000775000000000000000000000000015024302470017222 5ustar rootrootdependencies/pkg/mod/github.com/cespare/xxhash/0000775000000000000000000000000015024302470020525 5ustar rootrootdependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/0000755000000000000000000000000015024302470021721 5ustar rootrootdependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash_amd64.s0000644000000000000000000000673615024302470024417 0ustar rootroot//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" // Registers: #define h AX #define d AX #define p SI // pointer to advance through b #define n DX #define end BX // loop end #define v1 R8 #define v2 R9 #define v3 R10 #define v4 R11 #define x R12 #define prime1 R13 #define prime2 R14 #define prime4 DI #define round(acc, x) \ IMULQ prime2, x \ ADDQ x, acc \ ROLQ $31, acc \ IMULQ prime1, acc // round0 performs the operation x = round(0, x). #define round0(x) \ IMULQ prime2, x \ ROLQ $31, x \ IMULQ prime1, x // mergeRound applies a merge round on the two registers acc and x. // It assumes that prime1, prime2, and prime4 have been loaded. #define mergeRound(acc, x) \ round0(x) \ XORQ x, acc \ IMULQ prime1, acc \ ADDQ prime4, acc // blockLoop processes as many 32-byte blocks as possible, // updating v1, v2, v3, and v4. It assumes that there is at least one block // to process. #define blockLoop() \ loop: \ MOVQ +0(p), x \ round(v1, x) \ MOVQ +8(p), x \ round(v2, x) \ MOVQ +16(p), x \ round(v3, x) \ MOVQ +24(p), x \ round(v4, x) \ ADDQ $32, p \ CMPQ p, end \ JLE loop // func Sum64(b []byte) uint64 TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. MOVQ ·primes+0(SB), prime1 MOVQ ·primes+8(SB), prime2 MOVQ ·primes+24(SB), prime4 // Load slice. MOVQ b_base+0(FP), p MOVQ b_len+8(FP), n LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. SUBQ $32, end // Check whether we have at least one block. CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). MOVQ prime1, v1 ADDQ prime2, v1 MOVQ prime2, v2 XORQ v3, v3 XORQ v4, v4 SUBQ prime1, v4 blockLoop() MOVQ v1, h ROLQ $1, h MOVQ v2, x ROLQ $7, x ADDQ x, h MOVQ v3, x ROLQ $12, x ADDQ x, h MOVQ v4, x ROLQ $18, x ADDQ x, h mergeRound(h, v1) mergeRound(h, v2) mergeRound(h, v3) mergeRound(h, v4) JMP afterBlocks noBlocks: MOVQ ·primes+32(SB), h afterBlocks: ADDQ n, h ADDQ $24, end CMPQ p, end JG try4 loop8: MOVQ (p), x ADDQ $8, p round0(x) XORQ x, h ROLQ $27, h IMULQ prime1, h ADDQ prime4, h CMPQ p, end JLE loop8 try4: ADDQ $4, end CMPQ p, end JG try1 MOVL (p), x ADDQ $4, p IMULQ prime1, x XORQ x, h ROLQ $23, h IMULQ prime2, h ADDQ ·primes+16(SB), h try1: ADDQ $4, end CMPQ p, end JGE finalize loop1: MOVBQZX (p), x ADDQ $1, p IMULQ ·primes+32(SB), x XORQ x, h ROLQ $11, h IMULQ prime1, h CMPQ p, end JL loop1 finalize: MOVQ h, x SHRQ $33, x XORQ x, h IMULQ prime2, h MOVQ h, x SHRQ $29, x XORQ x, h IMULQ ·primes+16(SB), h MOVQ h, x SHRQ $32, x XORQ x, h MOVQ h, ret+24(FP) RET // func writeBlocks(d *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. MOVQ ·primes+0(SB), prime1 MOVQ ·primes+8(SB), prime2 // Load slice. MOVQ b_base+8(FP), p MOVQ b_len+16(FP), n LEAQ (p)(n*1), end SUBQ $32, end // Load vN from d. MOVQ s+0(FP), d MOVQ 0(d), v1 MOVQ 8(d), v2 MOVQ 16(d), v3 MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. blockLoop() // Copy vN back to d. MOVQ v1, 0(d) MOVQ v2, 8(d) MOVQ v3, 16(d) MOVQ v4, 24(d) // The number of bytes written is p minus the old base pointer. SUBQ b_base+8(FP), p MOVQ p, ret+32(FP) RET dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/go.mod0000644000000000000000000000005515024302470023027 0ustar rootrootmodule github.com/cespare/xxhash/v2 go 1.11 dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash_unsafe.go0000644000000000000000000000405715024302470025122 0ustar rootroot//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. // xxhash_safe.go contains the safe implementations. package xxhash import ( "unsafe" ) // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as // Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. // NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: // // var b []byte // bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) // bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data // bh.Len = len(s) // bh.Cap = len(s) // // Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough // weight to this sequence of expressions that any function that uses it will // not be inlined. Instead, the functions below use a different unsafe // conversion designed to minimize the inliner weight and allow both to be // inlined. There is also a test (TestInlining) which verifies that these are // inlined. // // See https://github.com/golang/go/issues/42739 for discussion. // Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) return Sum64(b) } // WriteString adds more data to d. It always returns len(s), nil. // It may be faster than Write([]byte(s)) by avoiding a copy. func (d *Digest) WriteString(s string) (n int, err error) { d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) // d.Write always returns len(s), nil. // Ignoring the return output and returning these fixed values buys a // savings of 6 in the inliner's cost model. return len(s), nil } // sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout // of the first two words is the same as the layout of a string. type sliceHeader struct { s string cap int } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/README.md0000644000000000000000000000465515024302470023212 0ustar rootroot# xxhash [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. This package provides a straightforward API: ``` func Sum64(b []byte) uint64 func Sum64String(s string) uint64 type Digest struct{ ... } func New() *Digest ``` The `Digest` type implements hash.Hash64. Its key methods are: ``` func (*Digest) Write([]byte) (int, error) func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` The package is written with optimized pure Go and also contains even faster assembly implementations for amd64 and arm64. If desired, the `purego` build tag opts into using the Go code even on those architectures. [xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility This package is in a module and the latest code is in version 2 of the module. You need a version of Go with at least "minimal module compatibility" to use github.com/cespare/xxhash/v2: * 1.9.7+ for Go 1.9 * 1.10.3+ for Go 1.10 * Go 1.11 or later I recommend using the latest release of Go. ## Benchmarks Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. | input size | purego | asm | | ---------- | --------- | --------- | | 4 B | 1.3 GB/s | 1.2 GB/s | | 16 B | 2.9 GB/s | 3.5 GB/s | | 100 B | 6.9 GB/s | 8.1 GB/s | | 4 KB | 11.7 GB/s | 16.7 GB/s | | 10 MB | 12.0 GB/s | 17.3 GB/s | These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C CPU using the following commands under Go 1.19.2: ``` benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) - [Ristretto](https://github.com/dgraph-io/ristretto) - [Badger](https://github.com/dgraph-io/badger) dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/bench_test.go0000644000000000000000000000235115024302470024367 0ustar rootrootpackage xxhash import ( "strings" "testing" ) var benchmarks = []struct { name string n int64 }{ {"4B", 4}, {"16B", 16}, {"100B", 100}, {"4KB", 4e3}, {"10MB", 10e6}, } func BenchmarkSum64(b *testing.B) { for _, bb := range benchmarks { in := make([]byte, bb.n) for i := range in { in[i] = byte(i) } b.Run(bb.name, func(b *testing.B) { b.SetBytes(bb.n) for i := 0; i < b.N; i++ { _ = Sum64(in) } }) } } func BenchmarkSum64String(b *testing.B) { for _, bb := range benchmarks { s := strings.Repeat("a", int(bb.n)) b.Run(bb.name, func(b *testing.B) { b.SetBytes(bb.n) for i := 0; i < b.N; i++ { _ = Sum64String(s) } }) } } func BenchmarkDigestBytes(b *testing.B) { for _, bb := range benchmarks { in := make([]byte, bb.n) for i := range in { in[i] = byte(i) } b.Run(bb.name, func(b *testing.B) { b.SetBytes(bb.n) for i := 0; i < b.N; i++ { h := New() h.Write(in) _ = h.Sum64() } }) } } func BenchmarkDigestString(b *testing.B) { for _, bb := range benchmarks { s := strings.Repeat("a", int(bb.n)) b.Run(bb.name, func(b *testing.B) { b.SetBytes(bb.n) for i := 0; i < b.N; i++ { h := New() h.WriteString(s) _ = h.Sum64() } }) } } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash.go0000644000000000000000000001303415024302470023554 0ustar rootroot// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described // at http://cyan4973.github.io/xxHash/. package xxhash import ( "encoding/binary" "errors" "math/bits" ) const ( prime1 uint64 = 11400714785074694791 prime2 uint64 = 14029467366897019727 prime3 uint64 = 1609587929392839161 prime4 uint64 = 9650029242287828579 prime5 uint64 = 2870177450012600261 ) // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a // contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. // // Note that a zero-valued Digest is not ready to receive writes. // Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 v3 uint64 v4 uint64 total uint64 mem [32]byte n int // how much of mem is used } // New creates a new Digest with a zero seed. func New() *Digest { return NewWithSeed(0) } // NewWithSeed creates a new Digest with the given seed. func NewWithSeed(seed uint64) *Digest { var d Digest d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. // It uses a seed value of zero. func (d *Digest) Reset() { d.ResetWithSeed(0) } // ResetWithSeed clears the Digest's state so that it can be reused. // It uses the given seed to initialize the state. func (d *Digest) ResetWithSeed(seed uint64) { d.v1 = seed + prime1 + prime2 d.v2 = seed + prime2 d.v3 = seed d.v4 = seed - prime1 d.total = 0 d.n = 0 } // Size always returns 8 bytes. func (d *Digest) Size() int { return 8 } // BlockSize always returns 32 bytes. func (d *Digest) BlockSize() int { return 32 } // Write adds more data to d. It always returns len(b), nil. func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) memleft := d.mem[d.n&(len(d.mem)-1):] if d.n+n < 32 { // This new data doesn't even fill the current block. copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) b = b[c:] d.n = 0 } if len(b) >= 32 { // One or more full blocks left. nw := writeBlocks(d, b) b = b[nw:] } // Store any remaining partial block. copy(d.mem[:], b) d.n = len(b) return } // Sum appends the current hash to b and returns the resulting slice. func (d *Digest) Sum(b []byte) []byte { s := d.Sum64() return append( b, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s), ) } // Sum64 returns the current hash. func (d *Digest) Sum64() uint64 { var h uint64 if d.total >= 32 { v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) h = mergeRound(h, v1) h = mergeRound(h, v2) h = mergeRound(h, v3) h = mergeRound(h, v4) } else { h = d.v3 + prime5 } h += d.total b := d.mem[:d.n&(len(d.mem)-1)] for ; len(b) >= 8; b = b[8:] { k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } if len(b) >= 4 { h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 b = b[4:] } for ; len(b) > 0; b = b[1:] { h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } h ^= h >> 33 h *= prime2 h ^= h >> 29 h *= prime3 h ^= h >> 32 return h } const ( magic = "xxh\x06" marshaledSize = len(magic) + 8*5 + 32 ) // MarshalBinary implements the encoding.BinaryMarshaler interface. func (d *Digest) MarshalBinary() ([]byte, error) { b := make([]byte, 0, marshaledSize) b = append(b, magic...) b = appendUint64(b, d.v1) b = appendUint64(b, d.v2) b = appendUint64(b, d.v3) b = appendUint64(b, d.v4) b = appendUint64(b, d.total) b = append(b, d.mem[:d.n]...) b = b[:len(b)+len(d.mem)-d.n] return b, nil } // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. func (d *Digest) UnmarshalBinary(b []byte) error { if len(b) < len(magic) || string(b[:len(magic)]) != magic { return errors.New("xxhash: invalid hash state identifier") } if len(b) != marshaledSize { return errors.New("xxhash: invalid hash state size") } b = b[len(magic):] b, d.v1 = consumeUint64(b) b, d.v2 = consumeUint64(b) b, d.v3 = consumeUint64(b) b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) d.n = int(d.total % uint64(len(d.mem))) return nil } func appendUint64(b []byte, x uint64) []byte { var a [8]byte binary.LittleEndian.PutUint64(a[:], x) return append(b, a[:]...) } func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b) return b[8:], x } func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } func round(acc, input uint64) uint64 { acc += input * prime2 acc = rol31(acc) acc *= prime1 return acc } func mergeRound(acc, val uint64) uint64 { val = round(0, val) acc ^= val acc = acc*prime1 + prime4 return acc } func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash_other.go0000644000000000000000000000312315024302470024753 0ustar rootroot//go:build (!amd64 && !arm64) || appengine || !gc || purego // +build !amd64,!arm64 appengine !gc purego package xxhash // Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() // d.Write(b) // return d.Sum64() // but this is faster, particularly for small inputs. n := len(b) var h uint64 if n >= 32 { v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) v3 = round(v3, u64(b[16:24:len(b)])) v4 = round(v4, u64(b[24:32:len(b)])) b = b[32:len(b):len(b)] } h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) h = mergeRound(h, v1) h = mergeRound(h, v2) h = mergeRound(h, v3) h = mergeRound(h, v4) } else { h = prime5 } h += uint64(n) for ; len(b) >= 8; b = b[8:] { k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } if len(b) >= 4 { h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 b = b[4:] } for ; len(b) > 0; b = b[1:] { h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } h ^= h >> 33 h *= prime2 h ^= h >> 29 h *= prime3 h ^= h >> 32 return h } func writeBlocks(d *Digest, b []byte) int { v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 n := len(b) for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) v3 = round(v3, u64(b[16:24:len(b)])) v4 = round(v4, u64(b[24:32:len(b)])) b = b[32:len(b):len(b)] } d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 return n - len(b) } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhsum/0000755000000000000000000000000015024302470023255 5ustar rootrootdependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhsum/xxhsum.go0000644000000000000000000000154515024302470025145 0ustar rootrootpackage main import ( "fmt" "io" "os" "github.com/cespare/xxhash/v2" ) func main() { if contains(os.Args[1:], "-h") { fmt.Fprintf(os.Stderr, `Usage: %s [filenames] If no filenames are provided or only - is given, input is read from stdin. `, os.Args[0]) os.Exit(1) } if len(os.Args) < 2 || len(os.Args) == 2 && os.Args[1] == "-" { printHash(os.Stdin, "-") return } for _, path := range os.Args[1:] { f, err := os.Open(path) if err != nil { fmt.Fprintln(os.Stderr, err) continue } printHash(f, path) f.Close() } } func contains(ss []string, s string) bool { for _, s1 := range ss { if s1 == s { return true } } return false } func printHash(r io.Reader, name string) { h := xxhash.New() if _, err := io.Copy(h, r); err != nil { fmt.Fprintln(os.Stderr, err) return } fmt.Printf("%016x %s\n", h.Sum64(), name) } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhsum/.gitignore0000644000000000000000000000001015024302470025234 0ustar rootroot/xxhsum dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/go.sum0000644000000000000000000000000015024302470023042 0ustar rootrootdependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash_arm64.s0000644000000000000000000000643015024302470024424 0ustar rootroot//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" // Registers: #define digest R1 #define h R2 // return value #define p R3 // input pointer #define n R4 // input length #define nblocks R5 // n / 32 #define prime1 R7 #define prime2 R8 #define prime3 R9 #define prime4 R10 #define prime5 R11 #define v1 R12 #define v2 R13 #define v3 R14 #define v4 R15 #define x1 R20 #define x2 R21 #define x3 R22 #define x4 R23 #define round(acc, x) \ MADD prime2, acc, x, acc \ ROR $64-31, acc \ MUL prime1, acc // round0 performs the operation x = round(0, x). #define round0(x) \ MUL prime2, x \ ROR $64-31, x \ MUL prime1, x #define mergeRound(acc, x) \ round0(x) \ EOR x, acc \ MADD acc, prime4, prime1, acc // blockLoop processes as many 32-byte blocks as possible, // updating v1, v2, v3, and v4. It assumes that n >= 32. #define blockLoop() \ LSR $5, n, nblocks \ PCALIGN $16 \ loop: \ LDP.P 16(p), (x1, x2) \ LDP.P 16(p), (x3, x4) \ round(v1, x1) \ round(v2, x2) \ round(v3, x3) \ round(v4, x4) \ SUB $1, nblocks \ CBNZ nblocks, loop // func Sum64(b []byte) uint64 TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 LDP b_base+0(FP), (p, n) LDP ·primes+0(SB), (prime1, prime2) LDP ·primes+16(SB), (prime3, prime4) MOVD ·primes+32(SB), prime5 CMP $32, n CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } BLT afterLoop ADD prime1, prime2, v1 MOVD prime2, v2 MOVD $0, v3 NEG prime1, v4 blockLoop() ROR $64-1, v1, x1 ROR $64-7, v2, x2 ADD x1, x2 ROR $64-12, v3, x3 ROR $64-18, v4, x4 ADD x3, x4 ADD x2, x4, h mergeRound(h, v1) mergeRound(h, v2) mergeRound(h, v3) mergeRound(h, v4) afterLoop: ADD n, h TBZ $4, n, try8 LDP.P 16(p), (x1, x2) round0(x1) // NOTE: here and below, sequencing the EOR after the ROR (using a // rotated register) is worth a small but measurable speedup for small // inputs. ROR $64-27, h EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h round0(x2) ROR $64-27, h EOR x2 @> 64-27, h, h MADD h, prime4, prime1, h try8: TBZ $3, n, try4 MOVD.P 8(p), x1 round0(x1) ROR $64-27, h EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h try4: TBZ $2, n, try2 MOVWU.P 4(p), x2 MUL prime1, x2 ROR $64-23, h EOR x2 @> 64-23, h, h MADD h, prime3, prime2, h try2: TBZ $1, n, try1 MOVHU.P 2(p), x3 AND $255, x3, x1 LSR $8, x3, x2 MUL prime5, x1 ROR $64-11, h EOR x1 @> 64-11, h, h MUL prime1, h MUL prime5, x2 ROR $64-11, h EOR x2 @> 64-11, h, h MUL prime1, h try1: TBZ $0, n, finalize MOVBU (p), x4 MUL prime5, x4 ROR $64-11, h EOR x4 @> 64-11, h, h MUL prime1, h finalize: EOR h >> 33, h MUL prime2, h EOR h >> 29, h MUL prime3, h EOR h >> 32, h MOVD h, ret+24(FP) RET // func writeBlocks(d *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. MOVD d+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) LDP b_base+8(FP), (p, n) blockLoop() // Store updated state. STP (v1, v2), 0(digest) STP (v3, v4), 16(digest) BIC $31, n MOVD n, ret+32(FP) RET dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/testall.sh0000644000000000000000000000043215024302470023724 0ustar rootroot#!/bin/bash set -eu -o pipefail # Small convenience script for running the tests with various combinations of # arch/tags. This assumes we're running on amd64 and have qemu available. go test ./... go test -tags purego ./... GOARCH=arm64 go test GOARCH=arm64 go test -tags purego dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/.github/0000755000000000000000000000000015024302470023261 5ustar rootrootdependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/.github/workflows/0000755000000000000000000000000015024302470025316 5ustar rootrootdependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/.github/workflows/test.yml0000644000000000000000000000243115024302470027020 0ustar rootrootname: Test on: push: branches: [main] pull_request: jobs: test: strategy: matrix: go-version: [1.18.x, 1.19.x] os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - name: Install go uses: WillAbides/setup-go-faster@v1.5.0 with: go-version: ${{ matrix.go-version }} - name: Check out code uses: actions/checkout@v2 - name: Test run: go test -count 1 -bench . -benchtime 1x ./... - name: Test with -tags purego run: go test -count 1 -bench . -benchtime 1x -tags purego ./... test-qemu: needs: test strategy: matrix: go-version: [1.18.x, 1.19.x] arch: [386, arm, arm64] runs-on: ubuntu-latest steps: - name: Install go uses: WillAbides/setup-go-faster@v1.5.0 with: go-version: ${{ matrix.go-version }} - name: Install QEMU uses: docker/setup-qemu-action@v1 - name: Check out code uses: actions/checkout@v2 - name: Run test via qemu/binfmt # TODO: Run the dynamic linking tests as well. That is a little more # involved. run: go test -v -count 1 -bench . -benchtime 1x env: GOARCH: ${{ matrix.arch }} dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/LICENSE.txt0000644000000000000000000000205415024302470023545 0ustar rootrootCopyright (c) 2016 Caleb Spare MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/dynamic/0000755000000000000000000000000015024302470023345 5ustar rootrootdependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/dynamic/plugin.go0000644000000000000000000000175515024302470025202 0ustar rootroot//go:build ignore // +build ignore package main import ( "fmt" "log" "testing" "github.com/cespare/xxhash/v2" ) const ( in = "Call me Ishmael. Some years ago--never mind how long precisely-" want = uint64(0x02a2e85470d6fd96) ) func TestSum(t *testing.T) { got := xxhash.Sum64String(in) if got != want { t.Fatalf("Sum64String: got 0x%x; want 0x%x", got, want) } } func TestDigest(t *testing.T) { for chunkSize := 1; chunkSize <= len(in); chunkSize++ { name := fmt.Sprintf("[chunkSize=%d]", chunkSize) t.Run(name, func(t *testing.T) { d := xxhash.New() for i := 0; i < len(in); i += chunkSize { chunk := in[i:] if len(chunk) > chunkSize { chunk = chunk[:chunkSize] } n, err := d.WriteString(chunk) if err != nil || n != len(chunk) { t.Fatalf("Digest.WriteString: got (%d, %v); want (%d, nil)", n, err, len(chunk)) } } if got := d.Sum64(); got != want { log.Fatalf("Digest.Sum64: got 0x%x; want 0x%x", got, want) } }) } } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/dynamic/dynamic_test.go0000644000000000000000000000151615024302470026362 0ustar rootroot//go:build linux || darwin // +build linux darwin package main import ( "bytes" "log" "os" "os/exec" "plugin" "testing" ) // This is a cursory test that checks whether things work under dynamic linking. func TestMain(m *testing.M) { cmd := exec.Command( "go", "build", "-buildmode", "plugin", "-o", "plugin.so", "plugin.go", ) var out bytes.Buffer cmd.Stdout = &out cmd.Stderr = &out if err := cmd.Run(); err != nil { log.Fatalf("Error building plugin: %s\nOutput:\n%s", err, out.String()) } os.Exit(m.Run()) } func TestDynamic(t *testing.T) { plug, err := plugin.Open("plugin.so") if err != nil { t.Fatal(err) } for _, test := range []string{ "TestSum", "TestDigest", } { f, err := plug.Lookup(test) if err != nil { t.Fatalf("cannot find func %s: %s", test, err) } f.(func(*testing.T))(t) } } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/dynamic/.gitignore0000644000000000000000000000001315024302470025327 0ustar rootroot/plugin.so dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash_safe.go0000644000000000000000000000065615024302470024560 0ustar rootroot//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. package xxhash // Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } // WriteString adds more data to d. It always returns len(s), nil. func (d *Digest) WriteString(s string) (n int, err error) { return d.Write([]byte(s)) } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash_test.go0000644000000000000000000001154415024302470024617 0ustar rootrootpackage xxhash import ( "bytes" "encoding/binary" "fmt" "math" "strings" "testing" ) func TestAll(t *testing.T) { // Exactly 63 characters, which exercises all code paths. const s63 = "Call me Ishmael. Some years ago--never mind how long precisely-" for _, tt := range []struct { input string seed uint64 want uint64 }{ {"", 0, 0xef46db3751d8e999}, {"a", 0, 0xd24ec4f1a98c6e5b}, {"as", 0, 0x1c330fb2d66be179}, {"asd", 0, 0x631c37ce72a97393}, {"asdf", 0, 0x415872f599cea71e}, {s63, 0, 0x02a2e85470d6fd96}, {"", 123, 0xe0db84de91f3e198}, {"asdf", math.MaxUint64, 0x9a2fd8473be539b6}, {s63, 54321, 0x1736d186daf5d1cd}, } { lastChunkSize := len(tt.input) if lastChunkSize == 0 { lastChunkSize = 1 } var name string if tt.input == "" { name = "input=empty" } else if len(tt.input) > 10 { name = fmt.Sprintf("input=len-%d", len(tt.input)) } else { name = fmt.Sprintf("input=%q", tt.input) } if tt.seed != 0 { name += fmt.Sprintf(",seed=%d", tt.seed) } for chunkSize := 1; chunkSize <= lastChunkSize; chunkSize++ { name := fmt.Sprintf("%s,chunkSize=%d", name, chunkSize) t.Run(name, func(t *testing.T) { testDigest(t, tt.input, tt.seed, chunkSize, tt.want) }) } if tt.seed == 0 { t.Run(name, func(t *testing.T) { testSum(t, tt.input, tt.want) }) } } } func testDigest(t *testing.T, input string, seed uint64, chunkSize int, want uint64) { d := NewWithSeed(seed) ds := NewWithSeed(seed) // uses WriteString for i := 0; i < len(input); i += chunkSize { chunk := input[i:] if len(chunk) > chunkSize { chunk = chunk[:chunkSize] } n, err := d.Write([]byte(chunk)) if err != nil || n != len(chunk) { t.Fatalf("Digest.Write: got (%d, %v); want (%d, nil)", n, err, len(chunk)) } n, err = ds.WriteString(chunk) if err != nil || n != len(chunk) { t.Fatalf("Digest.WriteString: got (%d, %v); want (%d, nil)", n, err, len(chunk)) } } if got := d.Sum64(); got != want { t.Fatalf("Digest.Sum64: got 0x%x; want 0x%x", got, want) } if got := ds.Sum64(); got != want { t.Fatalf("Digest.Sum64 (WriteString): got 0x%x; want 0x%x", got, want) } var b [8]byte binary.BigEndian.PutUint64(b[:], want) if got := d.Sum(nil); !bytes.Equal(got, b[:]) { t.Fatalf("Sum: got %v; want %v", got, b[:]) } } func testSum(t *testing.T, input string, want uint64) { if got := Sum64([]byte(input)); got != want { t.Fatalf("Sum64: got 0x%x; want 0x%x", got, want) } if got := Sum64String(input); got != want { t.Fatalf("Sum64String: got 0x%x; want 0x%x", got, want) } } func TestReset(t *testing.T) { parts := []string{"The quic", "k br", "o", "wn fox jumps", " ov", "er the lazy ", "dog."} d := New() for _, part := range parts { d.Write([]byte(part)) } h0 := d.Sum64() d.Reset() d.Write([]byte(strings.Join(parts, ""))) h1 := d.Sum64() if h0 != h1 { t.Errorf("0x%x != 0x%x", h0, h1) } } func TestResetWithSeed(t *testing.T) { parts := []string{"The quic", "k br", "o", "wn fox jumps", " ov", "er the lazy ", "dog."} d := NewWithSeed(123) for _, part := range parts { d.Write([]byte(part)) } h0 := d.Sum64() d.ResetWithSeed(123) d.Write([]byte(strings.Join(parts, ""))) h1 := d.Sum64() if h0 != h1 { t.Errorf("0x%x != 0x%x", h0, h1) } } func TestBinaryMarshaling(t *testing.T) { d := New() d.WriteString("abc") b, err := d.MarshalBinary() if err != nil { t.Fatal(err) } d = New() d.WriteString("junk") if err := d.UnmarshalBinary(b); err != nil { t.Fatal(err) } d.WriteString("def") if got, want := d.Sum64(), Sum64String("abcdef"); got != want { t.Fatalf("after MarshalBinary+UnmarshalBinary, got 0x%x; want 0x%x", got, want) } d0 := New() d1 := New() for i := 0; i < 64; i++ { b, err := d0.MarshalBinary() if err != nil { t.Fatal(err) } d0 = new(Digest) if err := d0.UnmarshalBinary(b); err != nil { t.Fatal(err) } if got, want := d0.Sum64(), d1.Sum64(); got != want { t.Fatalf("after %d Writes, unmarshaled Digest gave sum 0x%x; want 0x%x", i, got, want) } d0.Write([]byte{'a'}) d1.Write([]byte{'a'}) } } var sink uint64 func TestAllocs(t *testing.T) { const shortStr = "abcdefghijklmnop" // Sum64([]byte(shortString)) shouldn't allocate because the // intermediate []byte ought not to escape. // (See https://github.com/cespare/xxhash/pull/2.) t.Run("Sum64", func(t *testing.T) { testAllocs(t, func() { sink = Sum64([]byte(shortStr)) }) }) // Creating and using a Digest shouldn't allocate because its methods // shouldn't make it escape. (A previous version of New returned a // hash.Hash64 which forces an allocation.) t.Run("Digest", func(t *testing.T) { b := []byte("asdf") testAllocs(t, func() { d := New() d.Write(b) sink = d.Sum64() }) }) } func testAllocs(t *testing.T, fn func()) { t.Helper() if allocs := int(testing.AllocsPerRun(10, fn)); allocs > 0 { t.Fatalf("got %d allocation(s) (want zero)", allocs) } } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash_unsafe_test.go0000644000000000000000000000233015024302470026151 0ustar rootroot//go:build !appengine // +build !appengine package xxhash import ( "os/exec" "sort" "strings" "testing" ) func TestStringAllocs(t *testing.T) { longStr := strings.Repeat("a", 1000) t.Run("Sum64String", func(t *testing.T) { testAllocs(t, func() { sink = Sum64String(longStr) }) }) t.Run("Digest.WriteString", func(t *testing.T) { testAllocs(t, func() { d := New() d.WriteString(longStr) sink = d.Sum64() }) }) } // This test is inspired by the Go runtime tests in https://go.dev/cl/57410. // It asserts that certain important functions may be inlined. func TestInlining(t *testing.T) { funcs := map[string]struct{}{ "Sum64String": {}, "(*Digest).WriteString": {}, } cmd := exec.Command("go", "test", "-gcflags=-m", "-run", "xxxx") out, err := cmd.CombinedOutput() if err != nil { t.Log(string(out)) t.Fatal(err) } for _, line := range strings.Split(string(out), "\n") { parts := strings.Split(line, ": can inline") if len(parts) < 2 { continue } delete(funcs, strings.TrimSpace(parts[1])) } var failed []string for fn := range funcs { failed = append(failed, fn) } sort.Strings(failed) for _, fn := range failed { t.Errorf("function %s not inlined", fn) } } dependencies/pkg/mod/github.com/cespare/xxhash/v2@v2.3.0/xxhash_asm.go0000644000000000000000000000047615024302470024422 0ustar rootroot//go:build (amd64 || arm64) && !appengine && gc && !purego // +build amd64 arm64 // +build !appengine // +build gc // +build !purego package xxhash // Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 //go:noescape func writeBlocks(d *Digest, b []byte) int dependencies/pkg/mod/github.com/rivo/0000775000000000000000000000000015024302472016561 5ustar rootrootdependencies/pkg/mod/github.com/rivo/uniseg@v0.2.0/0000755000000000000000000000000015024302472020715 5ustar rootrootdependencies/pkg/mod/github.com/rivo/uniseg@v0.2.0/grapheme_test.go0000644000000000000000000050066315024302472024105 0ustar rootrootpackage uniseg import ( "fmt" "testing" ) // Type example. func ExampleGraphemes() { gr := NewGraphemes("👍🏼!") for gr.Next() { fmt.Printf("%x ", gr.Runes()) } // Output: [1f44d 1f3fc] [21] } // The test cases for the simple test function. var testCases = []struct { original string expected [][]rune }{ {original: "", expected: [][]rune{}}, {original: "x", expected: [][]rune{{0x78}}}, {original: "basic", expected: [][]rune{{0x62}, {0x61}, {0x73}, {0x69}, {0x63}}}, {original: "möp", expected: [][]rune{{0x6d}, {0x6f, 0x308}, {0x70}}}, {original: "\r\n", expected: [][]rune{{0xd, 0xa}}}, {original: "\n\n", expected: [][]rune{{0xa}, {0xa}}}, {original: "\t*", expected: [][]rune{{0x9}, {0x2a}}}, {original: "뢴", expected: [][]rune{{0x1105, 0x116c, 0x11ab}}}, {original: "ܐ܏ܒܓܕ", expected: [][]rune{{0x710}, {0x70f, 0x712}, {0x713}, {0x715}}}, {original: "ำ", expected: [][]rune{{0xe33}}}, {original: "ำำ", expected: [][]rune{{0xe33, 0xe33}}}, {original: "สระอำ", expected: [][]rune{{0xe2a}, {0xe23}, {0xe30}, {0xe2d, 0xe33}}}, {original: "*뢴*", expected: [][]rune{{0x2a}, {0x1105, 0x116c, 0x11ab}, {0x2a}}}, {original: "*👩‍❤️‍💋‍👩*", expected: [][]rune{{0x2a}, {0x1f469, 0x200d, 0x2764, 0xfe0f, 0x200d, 0x1f48b, 0x200d, 0x1f469}, {0x2a}}}, {original: "👩‍❤️‍💋‍👩", expected: [][]rune{{0x1f469, 0x200d, 0x2764, 0xfe0f, 0x200d, 0x1f48b, 0x200d, 0x1f469}}}, {original: "🏋🏽‍♀️", expected: [][]rune{{0x1f3cb, 0x1f3fd, 0x200d, 0x2640, 0xfe0f}}}, {original: "🙂", expected: [][]rune{{0x1f642}}}, {original: "🙂🙂", expected: [][]rune{{0x1f642}, {0x1f642}}}, {original: "🇩🇪", expected: [][]rune{{0x1f1e9, 0x1f1ea}}}, {original: "🏳️‍🌈", expected: [][]rune{{0x1f3f3, 0xfe0f, 0x200d, 0x1f308}}}, // The following tests are taken from // http://www.unicode.org/Public/12.0.0/ucd/auxiliary/GraphemeBreakTest.txt, // see https://www.unicode.org/license.html for the Unicode license agreement. {original: "\u0020\u0020", expected: [][]rune{{0x0020}, {0x0020}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0020\u0308\u0020", expected: [][]rune{{0x0020, 0x0308}, {0x0020}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0020\u000D", expected: [][]rune{{0x0020}, {0x000D}}}, // ÷ [0.2] SPACE (Other) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0020\u0308\u000D", expected: [][]rune{{0x0020, 0x0308}, {0x000D}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0020\u000A", expected: [][]rune{{0x0020}, {0x000A}}}, // ÷ [0.2] SPACE (Other) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0020\u0308\u000A", expected: [][]rune{{0x0020, 0x0308}, {0x000A}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0020\u0001", expected: [][]rune{{0x0020}, {0x0001}}}, // ÷ [0.2] SPACE (Other) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0020\u0308\u0001", expected: [][]rune{{0x0020, 0x0308}, {0x0001}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0020\u034F", expected: [][]rune{{0x0020, 0x034F}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0020\u0308\u034F", expected: [][]rune{{0x0020, 0x0308, 0x034F}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0020\U0001F1E6", expected: [][]rune{{0x0020}, {0x1F1E6}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0020\u0308\U0001F1E6", expected: [][]rune{{0x0020, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0020\u0600", expected: [][]rune{{0x0020}, {0x0600}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0020\u0308\u0600", expected: [][]rune{{0x0020, 0x0308}, {0x0600}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0020\u0903", expected: [][]rune{{0x0020, 0x0903}}}, // ÷ [0.2] SPACE (Other) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0020\u0308\u0903", expected: [][]rune{{0x0020, 0x0308, 0x0903}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0020\u1100", expected: [][]rune{{0x0020}, {0x1100}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0020\u0308\u1100", expected: [][]rune{{0x0020, 0x0308}, {0x1100}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0020\u1160", expected: [][]rune{{0x0020}, {0x1160}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0020\u0308\u1160", expected: [][]rune{{0x0020, 0x0308}, {0x1160}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0020\u11A8", expected: [][]rune{{0x0020}, {0x11A8}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0020\u0308\u11A8", expected: [][]rune{{0x0020, 0x0308}, {0x11A8}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0020\uAC00", expected: [][]rune{{0x0020}, {0xAC00}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0020\u0308\uAC00", expected: [][]rune{{0x0020, 0x0308}, {0xAC00}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0020\uAC01", expected: [][]rune{{0x0020}, {0xAC01}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0020\u0308\uAC01", expected: [][]rune{{0x0020, 0x0308}, {0xAC01}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0020\u231A", expected: [][]rune{{0x0020}, {0x231A}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0020\u0308\u231A", expected: [][]rune{{0x0020, 0x0308}, {0x231A}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0020\u0300", expected: [][]rune{{0x0020, 0x0300}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0020\u0308\u0300", expected: [][]rune{{0x0020, 0x0308, 0x0300}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0020\u200D", expected: [][]rune{{0x0020, 0x200D}}}, // ÷ [0.2] SPACE (Other) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0020\u0308\u200D", expected: [][]rune{{0x0020, 0x0308, 0x200D}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0020\u0378", expected: [][]rune{{0x0020}, {0x0378}}}, // ÷ [0.2] SPACE (Other) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0020\u0308\u0378", expected: [][]rune{{0x0020, 0x0308}, {0x0378}}}, // ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u000D\u0020", expected: [][]rune{{0x000D}, {0x0020}}}, // ÷ [0.2] (CR) ÷ [4.0] SPACE (Other) ÷ [0.3] {original: "\u000D\u0308\u0020", expected: [][]rune{{0x000D}, {0x0308}, {0x0020}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u000D\u000D", expected: [][]rune{{0x000D}, {0x000D}}}, // ÷ [0.2] (CR) ÷ [4.0] (CR) ÷ [0.3] {original: "\u000D\u0308\u000D", expected: [][]rune{{0x000D}, {0x0308}, {0x000D}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u000D\u000A", expected: [][]rune{{0x000D, 0x000A}}}, // ÷ [0.2] (CR) × [3.0] (LF) ÷ [0.3] {original: "\u000D\u0308\u000A", expected: [][]rune{{0x000D}, {0x0308}, {0x000A}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u000D\u0001", expected: [][]rune{{0x000D}, {0x0001}}}, // ÷ [0.2] (CR) ÷ [4.0] (Control) ÷ [0.3] {original: "\u000D\u0308\u0001", expected: [][]rune{{0x000D}, {0x0308}, {0x0001}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u000D\u034F", expected: [][]rune{{0x000D}, {0x034F}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u000D\u0308\u034F", expected: [][]rune{{0x000D}, {0x0308, 0x034F}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u000D\U0001F1E6", expected: [][]rune{{0x000D}, {0x1F1E6}}}, // ÷ [0.2] (CR) ÷ [4.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u000D\u0308\U0001F1E6", expected: [][]rune{{0x000D}, {0x0308}, {0x1F1E6}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u000D\u0600", expected: [][]rune{{0x000D}, {0x0600}}}, // ÷ [0.2] (CR) ÷ [4.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u000D\u0308\u0600", expected: [][]rune{{0x000D}, {0x0308}, {0x0600}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u000D\u0903", expected: [][]rune{{0x000D}, {0x0903}}}, // ÷ [0.2] (CR) ÷ [4.0] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u000D\u0308\u0903", expected: [][]rune{{0x000D}, {0x0308, 0x0903}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u000D\u1100", expected: [][]rune{{0x000D}, {0x1100}}}, // ÷ [0.2] (CR) ÷ [4.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u000D\u0308\u1100", expected: [][]rune{{0x000D}, {0x0308}, {0x1100}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u000D\u1160", expected: [][]rune{{0x000D}, {0x1160}}}, // ÷ [0.2] (CR) ÷ [4.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u000D\u0308\u1160", expected: [][]rune{{0x000D}, {0x0308}, {0x1160}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u000D\u11A8", expected: [][]rune{{0x000D}, {0x11A8}}}, // ÷ [0.2] (CR) ÷ [4.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u000D\u0308\u11A8", expected: [][]rune{{0x000D}, {0x0308}, {0x11A8}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u000D\uAC00", expected: [][]rune{{0x000D}, {0xAC00}}}, // ÷ [0.2] (CR) ÷ [4.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u000D\u0308\uAC00", expected: [][]rune{{0x000D}, {0x0308}, {0xAC00}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u000D\uAC01", expected: [][]rune{{0x000D}, {0xAC01}}}, // ÷ [0.2] (CR) ÷ [4.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u000D\u0308\uAC01", expected: [][]rune{{0x000D}, {0x0308}, {0xAC01}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u000D\u231A", expected: [][]rune{{0x000D}, {0x231A}}}, // ÷ [0.2] (CR) ÷ [4.0] WATCH (ExtPict) ÷ [0.3] {original: "\u000D\u0308\u231A", expected: [][]rune{{0x000D}, {0x0308}, {0x231A}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u000D\u0300", expected: [][]rune{{0x000D}, {0x0300}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u000D\u0308\u0300", expected: [][]rune{{0x000D}, {0x0308, 0x0300}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u000D\u200D", expected: [][]rune{{0x000D}, {0x200D}}}, // ÷ [0.2] (CR) ÷ [4.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u000D\u0308\u200D", expected: [][]rune{{0x000D}, {0x0308, 0x200D}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u000D\u0378", expected: [][]rune{{0x000D}, {0x0378}}}, // ÷ [0.2] (CR) ÷ [4.0] (Other) ÷ [0.3] {original: "\u000D\u0308\u0378", expected: [][]rune{{0x000D}, {0x0308}, {0x0378}}}, // ÷ [0.2] (CR) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u000A\u0020", expected: [][]rune{{0x000A}, {0x0020}}}, // ÷ [0.2] (LF) ÷ [4.0] SPACE (Other) ÷ [0.3] {original: "\u000A\u0308\u0020", expected: [][]rune{{0x000A}, {0x0308}, {0x0020}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u000A\u000D", expected: [][]rune{{0x000A}, {0x000D}}}, // ÷ [0.2] (LF) ÷ [4.0] (CR) ÷ [0.3] {original: "\u000A\u0308\u000D", expected: [][]rune{{0x000A}, {0x0308}, {0x000D}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u000A\u000A", expected: [][]rune{{0x000A}, {0x000A}}}, // ÷ [0.2] (LF) ÷ [4.0] (LF) ÷ [0.3] {original: "\u000A\u0308\u000A", expected: [][]rune{{0x000A}, {0x0308}, {0x000A}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u000A\u0001", expected: [][]rune{{0x000A}, {0x0001}}}, // ÷ [0.2] (LF) ÷ [4.0] (Control) ÷ [0.3] {original: "\u000A\u0308\u0001", expected: [][]rune{{0x000A}, {0x0308}, {0x0001}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u000A\u034F", expected: [][]rune{{0x000A}, {0x034F}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u000A\u0308\u034F", expected: [][]rune{{0x000A}, {0x0308, 0x034F}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u000A\U0001F1E6", expected: [][]rune{{0x000A}, {0x1F1E6}}}, // ÷ [0.2] (LF) ÷ [4.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u000A\u0308\U0001F1E6", expected: [][]rune{{0x000A}, {0x0308}, {0x1F1E6}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u000A\u0600", expected: [][]rune{{0x000A}, {0x0600}}}, // ÷ [0.2] (LF) ÷ [4.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u000A\u0308\u0600", expected: [][]rune{{0x000A}, {0x0308}, {0x0600}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u000A\u0903", expected: [][]rune{{0x000A}, {0x0903}}}, // ÷ [0.2] (LF) ÷ [4.0] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u000A\u0308\u0903", expected: [][]rune{{0x000A}, {0x0308, 0x0903}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u000A\u1100", expected: [][]rune{{0x000A}, {0x1100}}}, // ÷ [0.2] (LF) ÷ [4.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u000A\u0308\u1100", expected: [][]rune{{0x000A}, {0x0308}, {0x1100}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u000A\u1160", expected: [][]rune{{0x000A}, {0x1160}}}, // ÷ [0.2] (LF) ÷ [4.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u000A\u0308\u1160", expected: [][]rune{{0x000A}, {0x0308}, {0x1160}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u000A\u11A8", expected: [][]rune{{0x000A}, {0x11A8}}}, // ÷ [0.2] (LF) ÷ [4.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u000A\u0308\u11A8", expected: [][]rune{{0x000A}, {0x0308}, {0x11A8}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u000A\uAC00", expected: [][]rune{{0x000A}, {0xAC00}}}, // ÷ [0.2] (LF) ÷ [4.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u000A\u0308\uAC00", expected: [][]rune{{0x000A}, {0x0308}, {0xAC00}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u000A\uAC01", expected: [][]rune{{0x000A}, {0xAC01}}}, // ÷ [0.2] (LF) ÷ [4.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u000A\u0308\uAC01", expected: [][]rune{{0x000A}, {0x0308}, {0xAC01}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u000A\u231A", expected: [][]rune{{0x000A}, {0x231A}}}, // ÷ [0.2] (LF) ÷ [4.0] WATCH (ExtPict) ÷ [0.3] {original: "\u000A\u0308\u231A", expected: [][]rune{{0x000A}, {0x0308}, {0x231A}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u000A\u0300", expected: [][]rune{{0x000A}, {0x0300}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u000A\u0308\u0300", expected: [][]rune{{0x000A}, {0x0308, 0x0300}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u000A\u200D", expected: [][]rune{{0x000A}, {0x200D}}}, // ÷ [0.2] (LF) ÷ [4.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u000A\u0308\u200D", expected: [][]rune{{0x000A}, {0x0308, 0x200D}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u000A\u0378", expected: [][]rune{{0x000A}, {0x0378}}}, // ÷ [0.2] (LF) ÷ [4.0] (Other) ÷ [0.3] {original: "\u000A\u0308\u0378", expected: [][]rune{{0x000A}, {0x0308}, {0x0378}}}, // ÷ [0.2] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0001\u0020", expected: [][]rune{{0x0001}, {0x0020}}}, // ÷ [0.2] (Control) ÷ [4.0] SPACE (Other) ÷ [0.3] {original: "\u0001\u0308\u0020", expected: [][]rune{{0x0001}, {0x0308}, {0x0020}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0001\u000D", expected: [][]rune{{0x0001}, {0x000D}}}, // ÷ [0.2] (Control) ÷ [4.0] (CR) ÷ [0.3] {original: "\u0001\u0308\u000D", expected: [][]rune{{0x0001}, {0x0308}, {0x000D}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0001\u000A", expected: [][]rune{{0x0001}, {0x000A}}}, // ÷ [0.2] (Control) ÷ [4.0] (LF) ÷ [0.3] {original: "\u0001\u0308\u000A", expected: [][]rune{{0x0001}, {0x0308}, {0x000A}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0001\u0001", expected: [][]rune{{0x0001}, {0x0001}}}, // ÷ [0.2] (Control) ÷ [4.0] (Control) ÷ [0.3] {original: "\u0001\u0308\u0001", expected: [][]rune{{0x0001}, {0x0308}, {0x0001}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0001\u034F", expected: [][]rune{{0x0001}, {0x034F}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0001\u0308\u034F", expected: [][]rune{{0x0001}, {0x0308, 0x034F}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0001\U0001F1E6", expected: [][]rune{{0x0001}, {0x1F1E6}}}, // ÷ [0.2] (Control) ÷ [4.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0001\u0308\U0001F1E6", expected: [][]rune{{0x0001}, {0x0308}, {0x1F1E6}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0001\u0600", expected: [][]rune{{0x0001}, {0x0600}}}, // ÷ [0.2] (Control) ÷ [4.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0001\u0308\u0600", expected: [][]rune{{0x0001}, {0x0308}, {0x0600}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0001\u0903", expected: [][]rune{{0x0001}, {0x0903}}}, // ÷ [0.2] (Control) ÷ [4.0] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0001\u0308\u0903", expected: [][]rune{{0x0001}, {0x0308, 0x0903}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0001\u1100", expected: [][]rune{{0x0001}, {0x1100}}}, // ÷ [0.2] (Control) ÷ [4.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0001\u0308\u1100", expected: [][]rune{{0x0001}, {0x0308}, {0x1100}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0001\u1160", expected: [][]rune{{0x0001}, {0x1160}}}, // ÷ [0.2] (Control) ÷ [4.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0001\u0308\u1160", expected: [][]rune{{0x0001}, {0x0308}, {0x1160}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0001\u11A8", expected: [][]rune{{0x0001}, {0x11A8}}}, // ÷ [0.2] (Control) ÷ [4.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0001\u0308\u11A8", expected: [][]rune{{0x0001}, {0x0308}, {0x11A8}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0001\uAC00", expected: [][]rune{{0x0001}, {0xAC00}}}, // ÷ [0.2] (Control) ÷ [4.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0001\u0308\uAC00", expected: [][]rune{{0x0001}, {0x0308}, {0xAC00}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0001\uAC01", expected: [][]rune{{0x0001}, {0xAC01}}}, // ÷ [0.2] (Control) ÷ [4.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0001\u0308\uAC01", expected: [][]rune{{0x0001}, {0x0308}, {0xAC01}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0001\u231A", expected: [][]rune{{0x0001}, {0x231A}}}, // ÷ [0.2] (Control) ÷ [4.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0001\u0308\u231A", expected: [][]rune{{0x0001}, {0x0308}, {0x231A}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0001\u0300", expected: [][]rune{{0x0001}, {0x0300}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0001\u0308\u0300", expected: [][]rune{{0x0001}, {0x0308, 0x0300}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0001\u200D", expected: [][]rune{{0x0001}, {0x200D}}}, // ÷ [0.2] (Control) ÷ [4.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0001\u0308\u200D", expected: [][]rune{{0x0001}, {0x0308, 0x200D}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0001\u0378", expected: [][]rune{{0x0001}, {0x0378}}}, // ÷ [0.2] (Control) ÷ [4.0] (Other) ÷ [0.3] {original: "\u0001\u0308\u0378", expected: [][]rune{{0x0001}, {0x0308}, {0x0378}}}, // ÷ [0.2] (Control) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u034F\u0020", expected: [][]rune{{0x034F}, {0x0020}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u034F\u0308\u0020", expected: [][]rune{{0x034F, 0x0308}, {0x0020}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u034F\u000D", expected: [][]rune{{0x034F}, {0x000D}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [5.0] (CR) ÷ [0.3] {original: "\u034F\u0308\u000D", expected: [][]rune{{0x034F, 0x0308}, {0x000D}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u034F\u000A", expected: [][]rune{{0x034F}, {0x000A}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [5.0] (LF) ÷ [0.3] {original: "\u034F\u0308\u000A", expected: [][]rune{{0x034F, 0x0308}, {0x000A}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u034F\u0001", expected: [][]rune{{0x034F}, {0x0001}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [5.0] (Control) ÷ [0.3] {original: "\u034F\u0308\u0001", expected: [][]rune{{0x034F, 0x0308}, {0x0001}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u034F\u034F", expected: [][]rune{{0x034F, 0x034F}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u034F\u0308\u034F", expected: [][]rune{{0x034F, 0x0308, 0x034F}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u034F\U0001F1E6", expected: [][]rune{{0x034F}, {0x1F1E6}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u034F\u0308\U0001F1E6", expected: [][]rune{{0x034F, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u034F\u0600", expected: [][]rune{{0x034F}, {0x0600}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u034F\u0308\u0600", expected: [][]rune{{0x034F, 0x0308}, {0x0600}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u034F\u0903", expected: [][]rune{{0x034F, 0x0903}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u034F\u0308\u0903", expected: [][]rune{{0x034F, 0x0308, 0x0903}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u034F\u1100", expected: [][]rune{{0x034F}, {0x1100}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u034F\u0308\u1100", expected: [][]rune{{0x034F, 0x0308}, {0x1100}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u034F\u1160", expected: [][]rune{{0x034F}, {0x1160}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u034F\u0308\u1160", expected: [][]rune{{0x034F, 0x0308}, {0x1160}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u034F\u11A8", expected: [][]rune{{0x034F}, {0x11A8}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u034F\u0308\u11A8", expected: [][]rune{{0x034F, 0x0308}, {0x11A8}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u034F\uAC00", expected: [][]rune{{0x034F}, {0xAC00}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u034F\u0308\uAC00", expected: [][]rune{{0x034F, 0x0308}, {0xAC00}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u034F\uAC01", expected: [][]rune{{0x034F}, {0xAC01}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u034F\u0308\uAC01", expected: [][]rune{{0x034F, 0x0308}, {0xAC01}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u034F\u231A", expected: [][]rune{{0x034F}, {0x231A}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u034F\u0308\u231A", expected: [][]rune{{0x034F, 0x0308}, {0x231A}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u034F\u0300", expected: [][]rune{{0x034F, 0x0300}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u034F\u0308\u0300", expected: [][]rune{{0x034F, 0x0308, 0x0300}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u034F\u200D", expected: [][]rune{{0x034F, 0x200D}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u034F\u0308\u200D", expected: [][]rune{{0x034F, 0x0308, 0x200D}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u034F\u0378", expected: [][]rune{{0x034F}, {0x0378}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) ÷ [999.0] (Other) ÷ [0.3] {original: "\u034F\u0308\u0378", expected: [][]rune{{0x034F, 0x0308}, {0x0378}}}, // ÷ [0.2] COMBINING GRAPHEME JOINER (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\U0001F1E6\u0020", expected: [][]rune{{0x1F1E6}, {0x0020}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\U0001F1E6\u0308\u0020", expected: [][]rune{{0x1F1E6, 0x0308}, {0x0020}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\U0001F1E6\u000D", expected: [][]rune{{0x1F1E6}, {0x000D}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [5.0] (CR) ÷ [0.3] {original: "\U0001F1E6\u0308\u000D", expected: [][]rune{{0x1F1E6, 0x0308}, {0x000D}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\U0001F1E6\u000A", expected: [][]rune{{0x1F1E6}, {0x000A}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [5.0] (LF) ÷ [0.3] {original: "\U0001F1E6\u0308\u000A", expected: [][]rune{{0x1F1E6, 0x0308}, {0x000A}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\U0001F1E6\u0001", expected: [][]rune{{0x1F1E6}, {0x0001}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [5.0] (Control) ÷ [0.3] {original: "\U0001F1E6\u0308\u0001", expected: [][]rune{{0x1F1E6, 0x0308}, {0x0001}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\U0001F1E6\u034F", expected: [][]rune{{0x1F1E6, 0x034F}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\U0001F1E6\u0308\u034F", expected: [][]rune{{0x1F1E6, 0x0308, 0x034F}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\U0001F1E6\U0001F1E6", expected: [][]rune{{0x1F1E6, 0x1F1E6}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [12.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\U0001F1E6\u0308\U0001F1E6", expected: [][]rune{{0x1F1E6, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\U0001F1E6\u0600", expected: [][]rune{{0x1F1E6}, {0x0600}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\U0001F1E6\u0308\u0600", expected: [][]rune{{0x1F1E6, 0x0308}, {0x0600}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\U0001F1E6\u0903", expected: [][]rune{{0x1F1E6, 0x0903}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\U0001F1E6\u0308\u0903", expected: [][]rune{{0x1F1E6, 0x0308, 0x0903}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\U0001F1E6\u1100", expected: [][]rune{{0x1F1E6}, {0x1100}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\U0001F1E6\u0308\u1100", expected: [][]rune{{0x1F1E6, 0x0308}, {0x1100}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\U0001F1E6\u1160", expected: [][]rune{{0x1F1E6}, {0x1160}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\U0001F1E6\u0308\u1160", expected: [][]rune{{0x1F1E6, 0x0308}, {0x1160}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\U0001F1E6\u11A8", expected: [][]rune{{0x1F1E6}, {0x11A8}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\U0001F1E6\u0308\u11A8", expected: [][]rune{{0x1F1E6, 0x0308}, {0x11A8}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\U0001F1E6\uAC00", expected: [][]rune{{0x1F1E6}, {0xAC00}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\U0001F1E6\u0308\uAC00", expected: [][]rune{{0x1F1E6, 0x0308}, {0xAC00}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\U0001F1E6\uAC01", expected: [][]rune{{0x1F1E6}, {0xAC01}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\U0001F1E6\u0308\uAC01", expected: [][]rune{{0x1F1E6, 0x0308}, {0xAC01}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\U0001F1E6\u231A", expected: [][]rune{{0x1F1E6}, {0x231A}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\U0001F1E6\u0308\u231A", expected: [][]rune{{0x1F1E6, 0x0308}, {0x231A}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\U0001F1E6\u0300", expected: [][]rune{{0x1F1E6, 0x0300}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\U0001F1E6\u0308\u0300", expected: [][]rune{{0x1F1E6, 0x0308, 0x0300}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\U0001F1E6\u200D", expected: [][]rune{{0x1F1E6, 0x200D}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\U0001F1E6\u0308\u200D", expected: [][]rune{{0x1F1E6, 0x0308, 0x200D}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\U0001F1E6\u0378", expected: [][]rune{{0x1F1E6}, {0x0378}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] (Other) ÷ [0.3] {original: "\U0001F1E6\u0308\u0378", expected: [][]rune{{0x1F1E6, 0x0308}, {0x0378}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0600\u0020", expected: [][]rune{{0x0600, 0x0020}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] SPACE (Other) ÷ [0.3] {original: "\u0600\u0308\u0020", expected: [][]rune{{0x0600, 0x0308}, {0x0020}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0600\u000D", expected: [][]rune{{0x0600}, {0x000D}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0600\u0308\u000D", expected: [][]rune{{0x0600, 0x0308}, {0x000D}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0600\u000A", expected: [][]rune{{0x0600}, {0x000A}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0600\u0308\u000A", expected: [][]rune{{0x0600, 0x0308}, {0x000A}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0600\u0001", expected: [][]rune{{0x0600}, {0x0001}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0600\u0308\u0001", expected: [][]rune{{0x0600, 0x0308}, {0x0001}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0600\u034F", expected: [][]rune{{0x0600, 0x034F}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0600\u0308\u034F", expected: [][]rune{{0x0600, 0x0308, 0x034F}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0600\U0001F1E6", expected: [][]rune{{0x0600, 0x1F1E6}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0600\u0308\U0001F1E6", expected: [][]rune{{0x0600, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0600\u0600", expected: [][]rune{{0x0600, 0x0600}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0600\u0308\u0600", expected: [][]rune{{0x0600, 0x0308}, {0x0600}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0600\u0903", expected: [][]rune{{0x0600, 0x0903}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0600\u0308\u0903", expected: [][]rune{{0x0600, 0x0308, 0x0903}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0600\u1100", expected: [][]rune{{0x0600, 0x1100}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0600\u0308\u1100", expected: [][]rune{{0x0600, 0x0308}, {0x1100}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0600\u1160", expected: [][]rune{{0x0600, 0x1160}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0600\u0308\u1160", expected: [][]rune{{0x0600, 0x0308}, {0x1160}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0600\u11A8", expected: [][]rune{{0x0600, 0x11A8}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0600\u0308\u11A8", expected: [][]rune{{0x0600, 0x0308}, {0x11A8}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0600\uAC00", expected: [][]rune{{0x0600, 0xAC00}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0600\u0308\uAC00", expected: [][]rune{{0x0600, 0x0308}, {0xAC00}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0600\uAC01", expected: [][]rune{{0x0600, 0xAC01}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0600\u0308\uAC01", expected: [][]rune{{0x0600, 0x0308}, {0xAC01}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0600\u231A", expected: [][]rune{{0x0600, 0x231A}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] WATCH (ExtPict) ÷ [0.3] {original: "\u0600\u0308\u231A", expected: [][]rune{{0x0600, 0x0308}, {0x231A}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0600\u0300", expected: [][]rune{{0x0600, 0x0300}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0600\u0308\u0300", expected: [][]rune{{0x0600, 0x0308, 0x0300}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0600\u200D", expected: [][]rune{{0x0600, 0x200D}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0600\u0308\u200D", expected: [][]rune{{0x0600, 0x0308, 0x200D}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0600\u0378", expected: [][]rune{{0x0600, 0x0378}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] (Other) ÷ [0.3] {original: "\u0600\u0308\u0378", expected: [][]rune{{0x0600, 0x0308}, {0x0378}}}, // ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0903\u0020", expected: [][]rune{{0x0903}, {0x0020}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0903\u0308\u0020", expected: [][]rune{{0x0903, 0x0308}, {0x0020}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0903\u000D", expected: [][]rune{{0x0903}, {0x000D}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0903\u0308\u000D", expected: [][]rune{{0x0903, 0x0308}, {0x000D}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0903\u000A", expected: [][]rune{{0x0903}, {0x000A}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0903\u0308\u000A", expected: [][]rune{{0x0903, 0x0308}, {0x000A}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0903\u0001", expected: [][]rune{{0x0903}, {0x0001}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0903\u0308\u0001", expected: [][]rune{{0x0903, 0x0308}, {0x0001}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0903\u034F", expected: [][]rune{{0x0903, 0x034F}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0903\u0308\u034F", expected: [][]rune{{0x0903, 0x0308, 0x034F}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0903\U0001F1E6", expected: [][]rune{{0x0903}, {0x1F1E6}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0903\u0308\U0001F1E6", expected: [][]rune{{0x0903, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0903\u0600", expected: [][]rune{{0x0903}, {0x0600}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0903\u0308\u0600", expected: [][]rune{{0x0903, 0x0308}, {0x0600}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0903\u0903", expected: [][]rune{{0x0903, 0x0903}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0903\u0308\u0903", expected: [][]rune{{0x0903, 0x0308, 0x0903}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0903\u1100", expected: [][]rune{{0x0903}, {0x1100}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0903\u0308\u1100", expected: [][]rune{{0x0903, 0x0308}, {0x1100}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0903\u1160", expected: [][]rune{{0x0903}, {0x1160}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0903\u0308\u1160", expected: [][]rune{{0x0903, 0x0308}, {0x1160}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0903\u11A8", expected: [][]rune{{0x0903}, {0x11A8}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0903\u0308\u11A8", expected: [][]rune{{0x0903, 0x0308}, {0x11A8}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0903\uAC00", expected: [][]rune{{0x0903}, {0xAC00}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0903\u0308\uAC00", expected: [][]rune{{0x0903, 0x0308}, {0xAC00}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0903\uAC01", expected: [][]rune{{0x0903}, {0xAC01}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0903\u0308\uAC01", expected: [][]rune{{0x0903, 0x0308}, {0xAC01}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0903\u231A", expected: [][]rune{{0x0903}, {0x231A}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0903\u0308\u231A", expected: [][]rune{{0x0903, 0x0308}, {0x231A}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0903\u0300", expected: [][]rune{{0x0903, 0x0300}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0903\u0308\u0300", expected: [][]rune{{0x0903, 0x0308, 0x0300}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0903\u200D", expected: [][]rune{{0x0903, 0x200D}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0903\u0308\u200D", expected: [][]rune{{0x0903, 0x0308, 0x200D}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0903\u0378", expected: [][]rune{{0x0903}, {0x0378}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0903\u0308\u0378", expected: [][]rune{{0x0903, 0x0308}, {0x0378}}}, // ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u1100\u0020", expected: [][]rune{{0x1100}, {0x0020}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u1100\u0308\u0020", expected: [][]rune{{0x1100, 0x0308}, {0x0020}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u1100\u000D", expected: [][]rune{{0x1100}, {0x000D}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [5.0] (CR) ÷ [0.3] {original: "\u1100\u0308\u000D", expected: [][]rune{{0x1100, 0x0308}, {0x000D}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u1100\u000A", expected: [][]rune{{0x1100}, {0x000A}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [5.0] (LF) ÷ [0.3] {original: "\u1100\u0308\u000A", expected: [][]rune{{0x1100, 0x0308}, {0x000A}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u1100\u0001", expected: [][]rune{{0x1100}, {0x0001}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [5.0] (Control) ÷ [0.3] {original: "\u1100\u0308\u0001", expected: [][]rune{{0x1100, 0x0308}, {0x0001}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u1100\u034F", expected: [][]rune{{0x1100, 0x034F}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u1100\u0308\u034F", expected: [][]rune{{0x1100, 0x0308, 0x034F}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u1100\U0001F1E6", expected: [][]rune{{0x1100}, {0x1F1E6}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u1100\u0308\U0001F1E6", expected: [][]rune{{0x1100, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u1100\u0600", expected: [][]rune{{0x1100}, {0x0600}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u1100\u0308\u0600", expected: [][]rune{{0x1100, 0x0308}, {0x0600}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u1100\u0903", expected: [][]rune{{0x1100, 0x0903}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u1100\u0308\u0903", expected: [][]rune{{0x1100, 0x0308, 0x0903}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u1100\u1100", expected: [][]rune{{0x1100, 0x1100}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [6.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u1100\u0308\u1100", expected: [][]rune{{0x1100, 0x0308}, {0x1100}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u1100\u1160", expected: [][]rune{{0x1100, 0x1160}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [6.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u1100\u0308\u1160", expected: [][]rune{{0x1100, 0x0308}, {0x1160}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u1100\u11A8", expected: [][]rune{{0x1100}, {0x11A8}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u1100\u0308\u11A8", expected: [][]rune{{0x1100, 0x0308}, {0x11A8}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u1100\uAC00", expected: [][]rune{{0x1100, 0xAC00}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [6.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u1100\u0308\uAC00", expected: [][]rune{{0x1100, 0x0308}, {0xAC00}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u1100\uAC01", expected: [][]rune{{0x1100, 0xAC01}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [6.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u1100\u0308\uAC01", expected: [][]rune{{0x1100, 0x0308}, {0xAC01}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u1100\u231A", expected: [][]rune{{0x1100}, {0x231A}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u1100\u0308\u231A", expected: [][]rune{{0x1100, 0x0308}, {0x231A}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u1100\u0300", expected: [][]rune{{0x1100, 0x0300}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u1100\u0308\u0300", expected: [][]rune{{0x1100, 0x0308, 0x0300}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u1100\u200D", expected: [][]rune{{0x1100, 0x200D}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u1100\u0308\u200D", expected: [][]rune{{0x1100, 0x0308, 0x200D}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u1100\u0378", expected: [][]rune{{0x1100}, {0x0378}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] (Other) ÷ [0.3] {original: "\u1100\u0308\u0378", expected: [][]rune{{0x1100, 0x0308}, {0x0378}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u1160\u0020", expected: [][]rune{{0x1160}, {0x0020}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u1160\u0308\u0020", expected: [][]rune{{0x1160, 0x0308}, {0x0020}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u1160\u000D", expected: [][]rune{{0x1160}, {0x000D}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [5.0] (CR) ÷ [0.3] {original: "\u1160\u0308\u000D", expected: [][]rune{{0x1160, 0x0308}, {0x000D}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u1160\u000A", expected: [][]rune{{0x1160}, {0x000A}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [5.0] (LF) ÷ [0.3] {original: "\u1160\u0308\u000A", expected: [][]rune{{0x1160, 0x0308}, {0x000A}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u1160\u0001", expected: [][]rune{{0x1160}, {0x0001}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [5.0] (Control) ÷ [0.3] {original: "\u1160\u0308\u0001", expected: [][]rune{{0x1160, 0x0308}, {0x0001}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u1160\u034F", expected: [][]rune{{0x1160, 0x034F}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u1160\u0308\u034F", expected: [][]rune{{0x1160, 0x0308, 0x034F}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u1160\U0001F1E6", expected: [][]rune{{0x1160}, {0x1F1E6}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u1160\u0308\U0001F1E6", expected: [][]rune{{0x1160, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u1160\u0600", expected: [][]rune{{0x1160}, {0x0600}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u1160\u0308\u0600", expected: [][]rune{{0x1160, 0x0308}, {0x0600}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u1160\u0903", expected: [][]rune{{0x1160, 0x0903}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u1160\u0308\u0903", expected: [][]rune{{0x1160, 0x0308, 0x0903}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u1160\u1100", expected: [][]rune{{0x1160}, {0x1100}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u1160\u0308\u1100", expected: [][]rune{{0x1160, 0x0308}, {0x1100}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u1160\u1160", expected: [][]rune{{0x1160, 0x1160}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [7.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u1160\u0308\u1160", expected: [][]rune{{0x1160, 0x0308}, {0x1160}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u1160\u11A8", expected: [][]rune{{0x1160, 0x11A8}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [7.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u1160\u0308\u11A8", expected: [][]rune{{0x1160, 0x0308}, {0x11A8}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u1160\uAC00", expected: [][]rune{{0x1160}, {0xAC00}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u1160\u0308\uAC00", expected: [][]rune{{0x1160, 0x0308}, {0xAC00}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u1160\uAC01", expected: [][]rune{{0x1160}, {0xAC01}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u1160\u0308\uAC01", expected: [][]rune{{0x1160, 0x0308}, {0xAC01}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u1160\u231A", expected: [][]rune{{0x1160}, {0x231A}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u1160\u0308\u231A", expected: [][]rune{{0x1160, 0x0308}, {0x231A}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u1160\u0300", expected: [][]rune{{0x1160, 0x0300}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u1160\u0308\u0300", expected: [][]rune{{0x1160, 0x0308, 0x0300}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u1160\u200D", expected: [][]rune{{0x1160, 0x200D}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u1160\u0308\u200D", expected: [][]rune{{0x1160, 0x0308, 0x200D}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u1160\u0378", expected: [][]rune{{0x1160}, {0x0378}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] (Other) ÷ [0.3] {original: "\u1160\u0308\u0378", expected: [][]rune{{0x1160, 0x0308}, {0x0378}}}, // ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u11A8\u0020", expected: [][]rune{{0x11A8}, {0x0020}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u11A8\u0308\u0020", expected: [][]rune{{0x11A8, 0x0308}, {0x0020}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u11A8\u000D", expected: [][]rune{{0x11A8}, {0x000D}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [5.0] (CR) ÷ [0.3] {original: "\u11A8\u0308\u000D", expected: [][]rune{{0x11A8, 0x0308}, {0x000D}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u11A8\u000A", expected: [][]rune{{0x11A8}, {0x000A}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [5.0] (LF) ÷ [0.3] {original: "\u11A8\u0308\u000A", expected: [][]rune{{0x11A8, 0x0308}, {0x000A}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u11A8\u0001", expected: [][]rune{{0x11A8}, {0x0001}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [5.0] (Control) ÷ [0.3] {original: "\u11A8\u0308\u0001", expected: [][]rune{{0x11A8, 0x0308}, {0x0001}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u11A8\u034F", expected: [][]rune{{0x11A8, 0x034F}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u11A8\u0308\u034F", expected: [][]rune{{0x11A8, 0x0308, 0x034F}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u11A8\U0001F1E6", expected: [][]rune{{0x11A8}, {0x1F1E6}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u11A8\u0308\U0001F1E6", expected: [][]rune{{0x11A8, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u11A8\u0600", expected: [][]rune{{0x11A8}, {0x0600}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u11A8\u0308\u0600", expected: [][]rune{{0x11A8, 0x0308}, {0x0600}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u11A8\u0903", expected: [][]rune{{0x11A8, 0x0903}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u11A8\u0308\u0903", expected: [][]rune{{0x11A8, 0x0308, 0x0903}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u11A8\u1100", expected: [][]rune{{0x11A8}, {0x1100}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u11A8\u0308\u1100", expected: [][]rune{{0x11A8, 0x0308}, {0x1100}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u11A8\u1160", expected: [][]rune{{0x11A8}, {0x1160}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u11A8\u0308\u1160", expected: [][]rune{{0x11A8, 0x0308}, {0x1160}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u11A8\u11A8", expected: [][]rune{{0x11A8, 0x11A8}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [8.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u11A8\u0308\u11A8", expected: [][]rune{{0x11A8, 0x0308}, {0x11A8}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u11A8\uAC00", expected: [][]rune{{0x11A8}, {0xAC00}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u11A8\u0308\uAC00", expected: [][]rune{{0x11A8, 0x0308}, {0xAC00}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u11A8\uAC01", expected: [][]rune{{0x11A8}, {0xAC01}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u11A8\u0308\uAC01", expected: [][]rune{{0x11A8, 0x0308}, {0xAC01}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u11A8\u231A", expected: [][]rune{{0x11A8}, {0x231A}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u11A8\u0308\u231A", expected: [][]rune{{0x11A8, 0x0308}, {0x231A}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u11A8\u0300", expected: [][]rune{{0x11A8, 0x0300}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u11A8\u0308\u0300", expected: [][]rune{{0x11A8, 0x0308, 0x0300}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u11A8\u200D", expected: [][]rune{{0x11A8, 0x200D}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u11A8\u0308\u200D", expected: [][]rune{{0x11A8, 0x0308, 0x200D}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u11A8\u0378", expected: [][]rune{{0x11A8}, {0x0378}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] (Other) ÷ [0.3] {original: "\u11A8\u0308\u0378", expected: [][]rune{{0x11A8, 0x0308}, {0x0378}}}, // ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\uAC00\u0020", expected: [][]rune{{0xAC00}, {0x0020}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\uAC00\u0308\u0020", expected: [][]rune{{0xAC00, 0x0308}, {0x0020}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\uAC00\u000D", expected: [][]rune{{0xAC00}, {0x000D}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [5.0] (CR) ÷ [0.3] {original: "\uAC00\u0308\u000D", expected: [][]rune{{0xAC00, 0x0308}, {0x000D}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\uAC00\u000A", expected: [][]rune{{0xAC00}, {0x000A}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [5.0] (LF) ÷ [0.3] {original: "\uAC00\u0308\u000A", expected: [][]rune{{0xAC00, 0x0308}, {0x000A}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\uAC00\u0001", expected: [][]rune{{0xAC00}, {0x0001}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [5.0] (Control) ÷ [0.3] {original: "\uAC00\u0308\u0001", expected: [][]rune{{0xAC00, 0x0308}, {0x0001}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\uAC00\u034F", expected: [][]rune{{0xAC00, 0x034F}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\uAC00\u0308\u034F", expected: [][]rune{{0xAC00, 0x0308, 0x034F}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\uAC00\U0001F1E6", expected: [][]rune{{0xAC00}, {0x1F1E6}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\uAC00\u0308\U0001F1E6", expected: [][]rune{{0xAC00, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\uAC00\u0600", expected: [][]rune{{0xAC00}, {0x0600}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\uAC00\u0308\u0600", expected: [][]rune{{0xAC00, 0x0308}, {0x0600}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\uAC00\u0903", expected: [][]rune{{0xAC00, 0x0903}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\uAC00\u0308\u0903", expected: [][]rune{{0xAC00, 0x0308, 0x0903}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\uAC00\u1100", expected: [][]rune{{0xAC00}, {0x1100}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\uAC00\u0308\u1100", expected: [][]rune{{0xAC00, 0x0308}, {0x1100}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\uAC00\u1160", expected: [][]rune{{0xAC00, 0x1160}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [7.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\uAC00\u0308\u1160", expected: [][]rune{{0xAC00, 0x0308}, {0x1160}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\uAC00\u11A8", expected: [][]rune{{0xAC00, 0x11A8}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [7.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\uAC00\u0308\u11A8", expected: [][]rune{{0xAC00, 0x0308}, {0x11A8}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\uAC00\uAC00", expected: [][]rune{{0xAC00}, {0xAC00}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\uAC00\u0308\uAC00", expected: [][]rune{{0xAC00, 0x0308}, {0xAC00}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\uAC00\uAC01", expected: [][]rune{{0xAC00}, {0xAC01}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\uAC00\u0308\uAC01", expected: [][]rune{{0xAC00, 0x0308}, {0xAC01}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\uAC00\u231A", expected: [][]rune{{0xAC00}, {0x231A}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\uAC00\u0308\u231A", expected: [][]rune{{0xAC00, 0x0308}, {0x231A}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\uAC00\u0300", expected: [][]rune{{0xAC00, 0x0300}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\uAC00\u0308\u0300", expected: [][]rune{{0xAC00, 0x0308, 0x0300}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\uAC00\u200D", expected: [][]rune{{0xAC00, 0x200D}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\uAC00\u0308\u200D", expected: [][]rune{{0xAC00, 0x0308, 0x200D}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\uAC00\u0378", expected: [][]rune{{0xAC00}, {0x0378}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] (Other) ÷ [0.3] {original: "\uAC00\u0308\u0378", expected: [][]rune{{0xAC00, 0x0308}, {0x0378}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\uAC01\u0020", expected: [][]rune{{0xAC01}, {0x0020}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\uAC01\u0308\u0020", expected: [][]rune{{0xAC01, 0x0308}, {0x0020}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\uAC01\u000D", expected: [][]rune{{0xAC01}, {0x000D}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [5.0] (CR) ÷ [0.3] {original: "\uAC01\u0308\u000D", expected: [][]rune{{0xAC01, 0x0308}, {0x000D}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\uAC01\u000A", expected: [][]rune{{0xAC01}, {0x000A}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [5.0] (LF) ÷ [0.3] {original: "\uAC01\u0308\u000A", expected: [][]rune{{0xAC01, 0x0308}, {0x000A}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\uAC01\u0001", expected: [][]rune{{0xAC01}, {0x0001}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [5.0] (Control) ÷ [0.3] {original: "\uAC01\u0308\u0001", expected: [][]rune{{0xAC01, 0x0308}, {0x0001}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\uAC01\u034F", expected: [][]rune{{0xAC01, 0x034F}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\uAC01\u0308\u034F", expected: [][]rune{{0xAC01, 0x0308, 0x034F}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\uAC01\U0001F1E6", expected: [][]rune{{0xAC01}, {0x1F1E6}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\uAC01\u0308\U0001F1E6", expected: [][]rune{{0xAC01, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\uAC01\u0600", expected: [][]rune{{0xAC01}, {0x0600}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\uAC01\u0308\u0600", expected: [][]rune{{0xAC01, 0x0308}, {0x0600}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\uAC01\u0903", expected: [][]rune{{0xAC01, 0x0903}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\uAC01\u0308\u0903", expected: [][]rune{{0xAC01, 0x0308, 0x0903}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\uAC01\u1100", expected: [][]rune{{0xAC01}, {0x1100}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\uAC01\u0308\u1100", expected: [][]rune{{0xAC01, 0x0308}, {0x1100}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\uAC01\u1160", expected: [][]rune{{0xAC01}, {0x1160}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\uAC01\u0308\u1160", expected: [][]rune{{0xAC01, 0x0308}, {0x1160}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\uAC01\u11A8", expected: [][]rune{{0xAC01, 0x11A8}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [8.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\uAC01\u0308\u11A8", expected: [][]rune{{0xAC01, 0x0308}, {0x11A8}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\uAC01\uAC00", expected: [][]rune{{0xAC01}, {0xAC00}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\uAC01\u0308\uAC00", expected: [][]rune{{0xAC01, 0x0308}, {0xAC00}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\uAC01\uAC01", expected: [][]rune{{0xAC01}, {0xAC01}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\uAC01\u0308\uAC01", expected: [][]rune{{0xAC01, 0x0308}, {0xAC01}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\uAC01\u231A", expected: [][]rune{{0xAC01}, {0x231A}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\uAC01\u0308\u231A", expected: [][]rune{{0xAC01, 0x0308}, {0x231A}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\uAC01\u0300", expected: [][]rune{{0xAC01, 0x0300}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\uAC01\u0308\u0300", expected: [][]rune{{0xAC01, 0x0308, 0x0300}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\uAC01\u200D", expected: [][]rune{{0xAC01, 0x200D}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\uAC01\u0308\u200D", expected: [][]rune{{0xAC01, 0x0308, 0x200D}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\uAC01\u0378", expected: [][]rune{{0xAC01}, {0x0378}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] (Other) ÷ [0.3] {original: "\uAC01\u0308\u0378", expected: [][]rune{{0xAC01, 0x0308}, {0x0378}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u231A\u0020", expected: [][]rune{{0x231A}, {0x0020}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u231A\u0308\u0020", expected: [][]rune{{0x231A, 0x0308}, {0x0020}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u231A\u000D", expected: [][]rune{{0x231A}, {0x000D}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [5.0] (CR) ÷ [0.3] {original: "\u231A\u0308\u000D", expected: [][]rune{{0x231A, 0x0308}, {0x000D}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u231A\u000A", expected: [][]rune{{0x231A}, {0x000A}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [5.0] (LF) ÷ [0.3] {original: "\u231A\u0308\u000A", expected: [][]rune{{0x231A, 0x0308}, {0x000A}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u231A\u0001", expected: [][]rune{{0x231A}, {0x0001}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [5.0] (Control) ÷ [0.3] {original: "\u231A\u0308\u0001", expected: [][]rune{{0x231A, 0x0308}, {0x0001}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u231A\u034F", expected: [][]rune{{0x231A, 0x034F}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u231A\u0308\u034F", expected: [][]rune{{0x231A, 0x0308, 0x034F}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u231A\U0001F1E6", expected: [][]rune{{0x231A}, {0x1F1E6}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u231A\u0308\U0001F1E6", expected: [][]rune{{0x231A, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u231A\u0600", expected: [][]rune{{0x231A}, {0x0600}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u231A\u0308\u0600", expected: [][]rune{{0x231A, 0x0308}, {0x0600}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u231A\u0903", expected: [][]rune{{0x231A, 0x0903}}}, // ÷ [0.2] WATCH (ExtPict) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u231A\u0308\u0903", expected: [][]rune{{0x231A, 0x0308, 0x0903}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u231A\u1100", expected: [][]rune{{0x231A}, {0x1100}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u231A\u0308\u1100", expected: [][]rune{{0x231A, 0x0308}, {0x1100}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u231A\u1160", expected: [][]rune{{0x231A}, {0x1160}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u231A\u0308\u1160", expected: [][]rune{{0x231A, 0x0308}, {0x1160}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u231A\u11A8", expected: [][]rune{{0x231A}, {0x11A8}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u231A\u0308\u11A8", expected: [][]rune{{0x231A, 0x0308}, {0x11A8}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u231A\uAC00", expected: [][]rune{{0x231A}, {0xAC00}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u231A\u0308\uAC00", expected: [][]rune{{0x231A, 0x0308}, {0xAC00}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u231A\uAC01", expected: [][]rune{{0x231A}, {0xAC01}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u231A\u0308\uAC01", expected: [][]rune{{0x231A, 0x0308}, {0xAC01}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u231A\u231A", expected: [][]rune{{0x231A}, {0x231A}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u231A\u0308\u231A", expected: [][]rune{{0x231A, 0x0308}, {0x231A}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u231A\u0300", expected: [][]rune{{0x231A, 0x0300}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u231A\u0308\u0300", expected: [][]rune{{0x231A, 0x0308, 0x0300}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u231A\u200D", expected: [][]rune{{0x231A, 0x200D}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u231A\u0308\u200D", expected: [][]rune{{0x231A, 0x0308, 0x200D}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u231A\u0378", expected: [][]rune{{0x231A}, {0x0378}}}, // ÷ [0.2] WATCH (ExtPict) ÷ [999.0] (Other) ÷ [0.3] {original: "\u231A\u0308\u0378", expected: [][]rune{{0x231A, 0x0308}, {0x0378}}}, // ÷ [0.2] WATCH (ExtPict) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0300\u0020", expected: [][]rune{{0x0300}, {0x0020}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0300\u0308\u0020", expected: [][]rune{{0x0300, 0x0308}, {0x0020}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0300\u000D", expected: [][]rune{{0x0300}, {0x000D}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0300\u0308\u000D", expected: [][]rune{{0x0300, 0x0308}, {0x000D}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0300\u000A", expected: [][]rune{{0x0300}, {0x000A}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0300\u0308\u000A", expected: [][]rune{{0x0300, 0x0308}, {0x000A}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0300\u0001", expected: [][]rune{{0x0300}, {0x0001}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0300\u0308\u0001", expected: [][]rune{{0x0300, 0x0308}, {0x0001}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0300\u034F", expected: [][]rune{{0x0300, 0x034F}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0300\u0308\u034F", expected: [][]rune{{0x0300, 0x0308, 0x034F}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0300\U0001F1E6", expected: [][]rune{{0x0300}, {0x1F1E6}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0300\u0308\U0001F1E6", expected: [][]rune{{0x0300, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0300\u0600", expected: [][]rune{{0x0300}, {0x0600}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0300\u0308\u0600", expected: [][]rune{{0x0300, 0x0308}, {0x0600}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0300\u0903", expected: [][]rune{{0x0300, 0x0903}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0300\u0308\u0903", expected: [][]rune{{0x0300, 0x0308, 0x0903}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0300\u1100", expected: [][]rune{{0x0300}, {0x1100}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0300\u0308\u1100", expected: [][]rune{{0x0300, 0x0308}, {0x1100}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0300\u1160", expected: [][]rune{{0x0300}, {0x1160}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0300\u0308\u1160", expected: [][]rune{{0x0300, 0x0308}, {0x1160}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0300\u11A8", expected: [][]rune{{0x0300}, {0x11A8}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0300\u0308\u11A8", expected: [][]rune{{0x0300, 0x0308}, {0x11A8}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0300\uAC00", expected: [][]rune{{0x0300}, {0xAC00}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0300\u0308\uAC00", expected: [][]rune{{0x0300, 0x0308}, {0xAC00}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0300\uAC01", expected: [][]rune{{0x0300}, {0xAC01}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0300\u0308\uAC01", expected: [][]rune{{0x0300, 0x0308}, {0xAC01}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0300\u231A", expected: [][]rune{{0x0300}, {0x231A}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0300\u0308\u231A", expected: [][]rune{{0x0300, 0x0308}, {0x231A}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0300\u0300", expected: [][]rune{{0x0300, 0x0300}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0300\u0308\u0300", expected: [][]rune{{0x0300, 0x0308, 0x0300}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0300\u200D", expected: [][]rune{{0x0300, 0x200D}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0300\u0308\u200D", expected: [][]rune{{0x0300, 0x0308, 0x200D}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0300\u0378", expected: [][]rune{{0x0300}, {0x0378}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0300\u0308\u0378", expected: [][]rune{{0x0300, 0x0308}, {0x0378}}}, // ÷ [0.2] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u200D\u0020", expected: [][]rune{{0x200D}, {0x0020}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u200D\u0308\u0020", expected: [][]rune{{0x200D, 0x0308}, {0x0020}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u200D\u000D", expected: [][]rune{{0x200D}, {0x000D}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u200D\u0308\u000D", expected: [][]rune{{0x200D, 0x0308}, {0x000D}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u200D\u000A", expected: [][]rune{{0x200D}, {0x000A}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u200D\u0308\u000A", expected: [][]rune{{0x200D, 0x0308}, {0x000A}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u200D\u0001", expected: [][]rune{{0x200D}, {0x0001}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u200D\u0308\u0001", expected: [][]rune{{0x200D, 0x0308}, {0x0001}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u200D\u034F", expected: [][]rune{{0x200D, 0x034F}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u200D\u0308\u034F", expected: [][]rune{{0x200D, 0x0308, 0x034F}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u200D\U0001F1E6", expected: [][]rune{{0x200D}, {0x1F1E6}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u200D\u0308\U0001F1E6", expected: [][]rune{{0x200D, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u200D\u0600", expected: [][]rune{{0x200D}, {0x0600}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u200D\u0308\u0600", expected: [][]rune{{0x200D, 0x0308}, {0x0600}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u200D\u0903", expected: [][]rune{{0x200D, 0x0903}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u200D\u0308\u0903", expected: [][]rune{{0x200D, 0x0308, 0x0903}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u200D\u1100", expected: [][]rune{{0x200D}, {0x1100}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u200D\u0308\u1100", expected: [][]rune{{0x200D, 0x0308}, {0x1100}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u200D\u1160", expected: [][]rune{{0x200D}, {0x1160}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u200D\u0308\u1160", expected: [][]rune{{0x200D, 0x0308}, {0x1160}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u200D\u11A8", expected: [][]rune{{0x200D}, {0x11A8}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u200D\u0308\u11A8", expected: [][]rune{{0x200D, 0x0308}, {0x11A8}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u200D\uAC00", expected: [][]rune{{0x200D}, {0xAC00}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u200D\u0308\uAC00", expected: [][]rune{{0x200D, 0x0308}, {0xAC00}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u200D\uAC01", expected: [][]rune{{0x200D}, {0xAC01}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u200D\u0308\uAC01", expected: [][]rune{{0x200D, 0x0308}, {0xAC01}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u200D\u231A", expected: [][]rune{{0x200D}, {0x231A}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u200D\u0308\u231A", expected: [][]rune{{0x200D, 0x0308}, {0x231A}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u200D\u0300", expected: [][]rune{{0x200D, 0x0300}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u200D\u0308\u0300", expected: [][]rune{{0x200D, 0x0308, 0x0300}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u200D\u200D", expected: [][]rune{{0x200D, 0x200D}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u200D\u0308\u200D", expected: [][]rune{{0x200D, 0x0308, 0x200D}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u200D\u0378", expected: [][]rune{{0x200D}, {0x0378}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u200D\u0308\u0378", expected: [][]rune{{0x200D, 0x0308}, {0x0378}}}, // ÷ [0.2] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0378\u0020", expected: [][]rune{{0x0378}, {0x0020}}}, // ÷ [0.2] (Other) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0378\u0308\u0020", expected: [][]rune{{0x0378, 0x0308}, {0x0020}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u0378\u000D", expected: [][]rune{{0x0378}, {0x000D}}}, // ÷ [0.2] (Other) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0378\u0308\u000D", expected: [][]rune{{0x0378, 0x0308}, {0x000D}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (CR) ÷ [0.3] {original: "\u0378\u000A", expected: [][]rune{{0x0378}, {0x000A}}}, // ÷ [0.2] (Other) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0378\u0308\u000A", expected: [][]rune{{0x0378, 0x0308}, {0x000A}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (LF) ÷ [0.3] {original: "\u0378\u0001", expected: [][]rune{{0x0378}, {0x0001}}}, // ÷ [0.2] (Other) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0378\u0308\u0001", expected: [][]rune{{0x0378, 0x0308}, {0x0001}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [5.0] (Control) ÷ [0.3] {original: "\u0378\u034F", expected: [][]rune{{0x0378, 0x034F}}}, // ÷ [0.2] (Other) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0378\u0308\u034F", expected: [][]rune{{0x0378, 0x0308, 0x034F}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAPHEME JOINER (Extend) ÷ [0.3] {original: "\u0378\U0001F1E6", expected: [][]rune{{0x0378}, {0x1F1E6}}}, // ÷ [0.2] (Other) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0378\u0308\U0001F1E6", expected: [][]rune{{0x0378, 0x0308}, {0x1F1E6}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] {original: "\u0378\u0600", expected: [][]rune{{0x0378}, {0x0600}}}, // ÷ [0.2] (Other) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0378\u0308\u0600", expected: [][]rune{{0x0378, 0x0308}, {0x0600}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] {original: "\u0378\u0903", expected: [][]rune{{0x0378, 0x0903}}}, // ÷ [0.2] (Other) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0378\u0308\u0903", expected: [][]rune{{0x0378, 0x0308, 0x0903}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] {original: "\u0378\u1100", expected: [][]rune{{0x0378}, {0x1100}}}, // ÷ [0.2] (Other) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0378\u0308\u1100", expected: [][]rune{{0x0378, 0x0308}, {0x1100}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\u0378\u1160", expected: [][]rune{{0x0378}, {0x1160}}}, // ÷ [0.2] (Other) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0378\u0308\u1160", expected: [][]rune{{0x0378, 0x0308}, {0x1160}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] {original: "\u0378\u11A8", expected: [][]rune{{0x0378}, {0x11A8}}}, // ÷ [0.2] (Other) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0378\u0308\u11A8", expected: [][]rune{{0x0378, 0x0308}, {0x11A8}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] {original: "\u0378\uAC00", expected: [][]rune{{0x0378}, {0xAC00}}}, // ÷ [0.2] (Other) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0378\u0308\uAC00", expected: [][]rune{{0x0378, 0x0308}, {0xAC00}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] {original: "\u0378\uAC01", expected: [][]rune{{0x0378}, {0xAC01}}}, // ÷ [0.2] (Other) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0378\u0308\uAC01", expected: [][]rune{{0x0378, 0x0308}, {0xAC01}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] {original: "\u0378\u231A", expected: [][]rune{{0x0378}, {0x231A}}}, // ÷ [0.2] (Other) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0378\u0308\u231A", expected: [][]rune{{0x0378, 0x0308}, {0x231A}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] WATCH (ExtPict) ÷ [0.3] {original: "\u0378\u0300", expected: [][]rune{{0x0378, 0x0300}}}, // ÷ [0.2] (Other) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0378\u0308\u0300", expected: [][]rune{{0x0378, 0x0308, 0x0300}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] COMBINING GRAVE ACCENT (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0378\u200D", expected: [][]rune{{0x0378, 0x200D}}}, // ÷ [0.2] (Other) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0378\u0308\u200D", expected: [][]rune{{0x0378, 0x0308, 0x200D}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0378\u0378", expected: [][]rune{{0x0378}, {0x0378}}}, // ÷ [0.2] (Other) ÷ [999.0] (Other) ÷ [0.3] {original: "\u0378\u0308\u0378", expected: [][]rune{{0x0378, 0x0308}, {0x0378}}}, // ÷ [0.2] (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] (Other) ÷ [0.3] {original: "\u000D\u000A\u0061\u000A\u0308", expected: [][]rune{{0x000D, 0x000A}, {0x0061}, {0x000A}, {0x0308}}}, // ÷ [0.2] (CR) × [3.0] (LF) ÷ [4.0] LATIN SMALL LETTER A (Other) ÷ [5.0] (LF) ÷ [4.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0061\u0308", expected: [][]rune{{0x0061, 0x0308}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [0.3] {original: "\u0020\u200D\u0646", expected: [][]rune{{0x0020, 0x200D}, {0x0646}}}, // ÷ [0.2] SPACE (Other) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] ARABIC LETTER NOON (Other) ÷ [0.3] {original: "\u0646\u200D\u0020", expected: [][]rune{{0x0646, 0x200D}, {0x0020}}}, // ÷ [0.2] ARABIC LETTER NOON (Other) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] SPACE (Other) ÷ [0.3] {original: "\u1100\u1100", expected: [][]rune{{0x1100, 0x1100}}}, // ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [6.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\uAC00\u11A8\u1100", expected: [][]rune{{0xAC00, 0x11A8}, {0x1100}}}, // ÷ [0.2] HANGUL SYLLABLE GA (LV) × [7.0] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\uAC01\u11A8\u1100", expected: [][]rune{{0xAC01, 0x11A8}, {0x1100}}}, // ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [8.0] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] {original: "\U0001F1E6\U0001F1E7\U0001F1E8\u0062", expected: [][]rune{{0x1F1E6, 0x1F1E7}, {0x1F1E8}, {0x0062}}}, // ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [12.0] REGIONAL INDICATOR SYMBOL LETTER B (RI) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER C (RI) ÷ [999.0] LATIN SMALL LETTER B (Other) ÷ [0.3] {original: "\u0061\U0001F1E6\U0001F1E7\U0001F1E8\u0062", expected: [][]rune{{0x0061}, {0x1F1E6, 0x1F1E7}, {0x1F1E8}, {0x0062}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [13.0] REGIONAL INDICATOR SYMBOL LETTER B (RI) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER C (RI) ÷ [999.0] LATIN SMALL LETTER B (Other) ÷ [0.3] {original: "\u0061\U0001F1E6\U0001F1E7\u200D\U0001F1E8\u0062", expected: [][]rune{{0x0061}, {0x1F1E6, 0x1F1E7, 0x200D}, {0x1F1E8}, {0x0062}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [13.0] REGIONAL INDICATOR SYMBOL LETTER B (RI) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER C (RI) ÷ [999.0] LATIN SMALL LETTER B (Other) ÷ [0.3] {original: "\u0061\U0001F1E6\u200D\U0001F1E7\U0001F1E8\u0062", expected: [][]rune{{0x0061}, {0x1F1E6, 0x200D}, {0x1F1E7, 0x1F1E8}, {0x0062}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER B (RI) × [13.0] REGIONAL INDICATOR SYMBOL LETTER C (RI) ÷ [999.0] LATIN SMALL LETTER B (Other) ÷ [0.3] {original: "\u0061\U0001F1E6\U0001F1E7\U0001F1E8\U0001F1E9\u0062", expected: [][]rune{{0x0061}, {0x1F1E6, 0x1F1E7}, {0x1F1E8, 0x1F1E9}, {0x0062}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [13.0] REGIONAL INDICATOR SYMBOL LETTER B (RI) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER C (RI) × [13.0] REGIONAL INDICATOR SYMBOL LETTER D (RI) ÷ [999.0] LATIN SMALL LETTER B (Other) ÷ [0.3] {original: "\u0061\u200D", expected: [][]rune{{0x0061, 0x200D}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [0.3] {original: "\u0061\u0308\u0062", expected: [][]rune{{0x0061, 0x0308}, {0x0062}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) ÷ [999.0] LATIN SMALL LETTER B (Other) ÷ [0.3] {original: "\u0061\u0903\u0062", expected: [][]rune{{0x0061, 0x0903}, {0x0062}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] LATIN SMALL LETTER B (Other) ÷ [0.3] {original: "\u0061\u0600\u0062", expected: [][]rune{{0x0061}, {0x0600, 0x0062}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) × [9.2] LATIN SMALL LETTER B (Other) ÷ [0.3] {original: "\U0001F476\U0001F3FF\U0001F476", expected: [][]rune{{0x1F476, 0x1F3FF}, {0x1F476}}}, // ÷ [0.2] BABY (ExtPict) × [9.0] EMOJI MODIFIER FITZPATRICK TYPE-6 (Extend) ÷ [999.0] BABY (ExtPict) ÷ [0.3] {original: "\u0061\U0001F3FF\U0001F476", expected: [][]rune{{0x0061, 0x1F3FF}, {0x1F476}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) × [9.0] EMOJI MODIFIER FITZPATRICK TYPE-6 (Extend) ÷ [999.0] BABY (ExtPict) ÷ [0.3] {original: "\u0061\U0001F3FF\U0001F476\u200D\U0001F6D1", expected: [][]rune{{0x0061, 0x1F3FF}, {0x1F476, 0x200D, 0x1F6D1}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) × [9.0] EMOJI MODIFIER FITZPATRICK TYPE-6 (Extend) ÷ [999.0] BABY (ExtPict) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [11.0] OCTAGONAL SIGN (ExtPict) ÷ [0.3] {original: "\U0001F476\U0001F3FF\u0308\u200D\U0001F476\U0001F3FF", expected: [][]rune{{0x1F476, 0x1F3FF, 0x0308, 0x200D, 0x1F476, 0x1F3FF}}}, // ÷ [0.2] BABY (ExtPict) × [9.0] EMOJI MODIFIER FITZPATRICK TYPE-6 (Extend) × [9.0] COMBINING DIAERESIS (Extend_ExtCccZwj) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [11.0] BABY (ExtPict) × [9.0] EMOJI MODIFIER FITZPATRICK TYPE-6 (Extend) ÷ [0.3] {original: "\U0001F6D1\u200D\U0001F6D1", expected: [][]rune{{0x1F6D1, 0x200D, 0x1F6D1}}}, // ÷ [0.2] OCTAGONAL SIGN (ExtPict) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [11.0] OCTAGONAL SIGN (ExtPict) ÷ [0.3] {original: "\u0061\u200D\U0001F6D1", expected: [][]rune{{0x0061, 0x200D}, {0x1F6D1}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] OCTAGONAL SIGN (ExtPict) ÷ [0.3] {original: "\u2701\u200D\u2701", expected: [][]rune{{0x2701, 0x200D, 0x2701}}}, // ÷ [0.2] UPPER BLADE SCISSORS (Other) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) × [11.0] UPPER BLADE SCISSORS (Other) ÷ [0.3] {original: "\u0061\u200D\u2701", expected: [][]rune{{0x0061, 0x200D}, {0x2701}}}, // ÷ [0.2] LATIN SMALL LETTER A (Other) × [9.0] ZERO WIDTH JOINER (ZWJ_ExtCccZwj) ÷ [999.0] UPPER BLADE SCISSORS (Other) ÷ [0.3] } // decomposed returns a grapheme cluster decomposition. func decomposed(s string) (runes [][]rune) { gr := NewGraphemes(s) for gr.Next() { runes = append(runes, gr.Runes()) } return } // Run the testCases slice above. func TestSimple(t *testing.T) { for testNum, testCase := range testCases { /*t.Logf(`Test case %d "%s": Expecting %x, getting %x, code points %x"`, testNum, strings.TrimSpace(testCase.original), testCase.expected, decomposed(testCase.original), []rune(testCase.original))*/ gr := NewGraphemes(testCase.original) var index int GraphemeLoop: for index = 0; gr.Next(); index++ { if index >= len(testCase.expected) { t.Errorf(`Test case %d "%s" failed: More grapheme clusters returned than expected %d`, testNum, testCase.original, len(testCase.expected)) break } cluster := gr.Runes() if len(cluster) != len(testCase.expected[index]) { t.Errorf(`Test case %d "%s" failed: Grapheme cluster at index %d has %d codepoints %x, %d expected %x`, testNum, testCase.original, index, len(cluster), cluster, len(testCase.expected[index]), testCase.expected[index]) break } for i, r := range cluster { if r != testCase.expected[index][i] { t.Errorf(`Test case %d "%s" failed: Grapheme cluster at index %d is %x, expected %x`, testNum, testCase.original, index, cluster, testCase.expected[index]) break GraphemeLoop } } } if index < len(testCase.expected) { t.Errorf(`Test case %d "%s" failed: Fewer grapheme clusters returned (%d) than expected (%d)`, testNum, testCase.original, index, len(testCase.expected)) } } } // Test the Str() function. func TestStr(t *testing.T) { gr := NewGraphemes("möp") gr.Next() gr.Next() gr.Next() if str := gr.Str(); str != "p" { t.Errorf(`Expected "p", got "%s"`, str) } } // Test the Bytes() function. func TestBytes(t *testing.T) { gr := NewGraphemes("A👩‍❤️‍💋‍👩B") gr.Next() gr.Next() gr.Next() b := gr.Bytes() if len(b) != 1 { t.Fatalf(`Expected len("B") == 1, got %d`, len(b)) } if b[0] != 'B' { t.Errorf(`Expected "B", got "%s"`, string(b[0])) } } // Test the Positions() function. func TestPositions(t *testing.T) { gr := NewGraphemes("A👩‍❤️‍💋‍👩B") gr.Next() gr.Next() from, to := gr.Positions() if from != 1 || to != 28 { t.Errorf(`Expected from=%d to=%d, got from=%d to=%d`, 1, 28, from, to) } } // Test the Reset() function. func TestReset(t *testing.T) { gr := NewGraphemes("möp") gr.Next() gr.Next() gr.Next() gr.Reset() gr.Next() if str := gr.Str(); str != "m" { t.Errorf(`Expected "m", got "%s"`, str) } } // Test retrieving clusters before calling Next(). func TestEarly(t *testing.T) { gr := NewGraphemes("test") r := gr.Runes() if r != nil { t.Errorf(`Expected nil rune slice, got %x`, r) } str := gr.Str() if str != "" { t.Errorf(`Expected empty string, got "%s"`, str) } b := gr.Bytes() if b != nil { t.Errorf(`Expected byte rune slice, got %x`, b) } from, to := gr.Positions() if from != 0 || to != 0 { t.Errorf(`Expected from=%d to=%d, got from=%d to=%d`, 0, 0, from, to) } } // Test retrieving more clusters after retrieving the last cluster. func TestLate(t *testing.T) { gr := NewGraphemes("x") gr.Next() gr.Next() r := gr.Runes() if r != nil { t.Errorf(`Expected nil rune slice, got %x`, r) } str := gr.Str() if str != "" { t.Errorf(`Expected empty string, got "%s"`, str) } b := gr.Bytes() if b != nil { t.Errorf(`Expected byte rune slice, got %x`, b) } from, to := gr.Positions() if from != 1 || to != 1 { t.Errorf(`Expected from=%d to=%d, got from=%d to=%d`, 1, 1, from, to) } } // Test the GraphemeClusterCount function. func TestCount(t *testing.T) { if n := GraphemeClusterCount("🇩🇪🏳️‍🌈"); n != 2 { t.Errorf(`Expected 2 grapheme clusters, got %d`, n) } } dependencies/pkg/mod/github.com/rivo/uniseg@v0.2.0/grapheme.go0000644000000000000000000002075415024302472023044 0ustar rootrootpackage uniseg import "unicode/utf8" // The states of the grapheme cluster parser. const ( grAny = iota grCR grControlLF grL grLVV grLVTT grPrepend grExtendedPictographic grExtendedPictographicZWJ grRIOdd grRIEven ) // The grapheme cluster parser's breaking instructions. const ( grNoBoundary = iota grBoundary ) // The grapheme cluster parser's state transitions. Maps (state, property) to // (new state, breaking instruction, rule number). The breaking instruction // always refers to the boundary between the last and next code point. // // This map is queried as follows: // // 1. Find specific state + specific property. Stop if found. // 2. Find specific state + any property. // 3. Find any state + specific property. // 4. If only (2) or (3) (but not both) was found, stop. // 5. If both (2) and (3) were found, use state and breaking instruction from // the transition with the lower rule number, prefer (3) if rule numbers // are equal. Stop. // 6. Assume grAny and grBoundary. var grTransitions = map[[2]int][3]int{ // GB5 {grAny, prCR}: {grCR, grBoundary, 50}, {grAny, prLF}: {grControlLF, grBoundary, 50}, {grAny, prControl}: {grControlLF, grBoundary, 50}, // GB4 {grCR, prAny}: {grAny, grBoundary, 40}, {grControlLF, prAny}: {grAny, grBoundary, 40}, // GB3. {grCR, prLF}: {grAny, grNoBoundary, 30}, // GB6. {grAny, prL}: {grL, grBoundary, 9990}, {grL, prL}: {grL, grNoBoundary, 60}, {grL, prV}: {grLVV, grNoBoundary, 60}, {grL, prLV}: {grLVV, grNoBoundary, 60}, {grL, prLVT}: {grLVTT, grNoBoundary, 60}, // GB7. {grAny, prLV}: {grLVV, grBoundary, 9990}, {grAny, prV}: {grLVV, grBoundary, 9990}, {grLVV, prV}: {grLVV, grNoBoundary, 70}, {grLVV, prT}: {grLVTT, grNoBoundary, 70}, // GB8. {grAny, prLVT}: {grLVTT, grBoundary, 9990}, {grAny, prT}: {grLVTT, grBoundary, 9990}, {grLVTT, prT}: {grLVTT, grNoBoundary, 80}, // GB9. {grAny, prExtend}: {grAny, grNoBoundary, 90}, {grAny, prZWJ}: {grAny, grNoBoundary, 90}, // GB9a. {grAny, prSpacingMark}: {grAny, grNoBoundary, 91}, // GB9b. {grAny, prPreprend}: {grPrepend, grBoundary, 9990}, {grPrepend, prAny}: {grAny, grNoBoundary, 92}, // GB11. {grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990}, {grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110}, {grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110}, {grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110}, // GB12 / GB13. {grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990}, {grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120}, {grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120}, } // Graphemes implements an iterator over Unicode extended grapheme clusters, // specified in the Unicode Standard Annex #29. Grapheme clusters correspond to // "user-perceived characters". These characters often consist of multiple // code points (e.g. the "woman kissing woman" emoji consists of 8 code points: // woman + ZWJ + heavy black heart (2 code points) + ZWJ + kiss mark + ZWJ + // woman) and the rules described in Annex #29 must be applied to group those // code points into clusters perceived by the user as one character. type Graphemes struct { // The code points over which this class iterates. codePoints []rune // The (byte-based) indices of the code points into the original string plus // len(original string). Thus, len(indices) = len(codePoints) + 1. indices []int // The current grapheme cluster to be returned. These are indices into // codePoints/indices. If start == end, we either haven't started iterating // yet (0) or the iteration has already completed (1). start, end int // The index of the next code point to be parsed. pos int // The current state of the code point parser. state int } // NewGraphemes returns a new grapheme cluster iterator. func NewGraphemes(s string) *Graphemes { l := utf8.RuneCountInString(s) codePoints := make([]rune, l) indices := make([]int, l+1) i := 0 for pos, r := range s { codePoints[i] = r indices[i] = pos i++ } indices[l] = len(s) g := &Graphemes{ codePoints: codePoints, indices: indices, } g.Next() // Parse ahead. return g } // Next advances the iterator by one grapheme cluster and returns false if no // clusters are left. This function must be called before the first cluster is // accessed. func (g *Graphemes) Next() bool { g.start = g.end // The state transition gives us a boundary instruction BEFORE the next code // point so we always need to stay ahead by one code point. // Parse the next code point. for g.pos <= len(g.codePoints) { // GB2. if g.pos == len(g.codePoints) { g.end = g.pos g.pos++ break } // Determine the property of the next character. nextProperty := property(g.codePoints[g.pos]) g.pos++ // Find the applicable transition. var boundary bool transition, ok := grTransitions[[2]int{g.state, nextProperty}] if ok { // We have a specific transition. We'll use it. g.state = transition[0] boundary = transition[1] == grBoundary } else { // No specific transition found. Try the less specific ones. transAnyProp, okAnyProp := grTransitions[[2]int{g.state, prAny}] transAnyState, okAnyState := grTransitions[[2]int{grAny, nextProperty}] if okAnyProp && okAnyState { // Both apply. We'll use a mix (see comments for grTransitions). g.state = transAnyState[0] boundary = transAnyState[1] == grBoundary if transAnyProp[2] < transAnyState[2] { g.state = transAnyProp[0] boundary = transAnyProp[1] == grBoundary } } else if okAnyProp { // We only have a specific state. g.state = transAnyProp[0] boundary = transAnyProp[1] == grBoundary // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. } else if okAnyState { // We only have a specific property. g.state = transAnyState[0] boundary = transAnyState[1] == grBoundary } else { // No known transition. GB999: Any x Any. g.state = grAny boundary = true } } // If we found a cluster boundary, let's stop here. The current cluster will // be the one that just ended. if g.pos-1 == 0 /* GB1 */ || boundary { g.end = g.pos - 1 break } } return g.start != g.end } // Runes returns a slice of runes (code points) which corresponds to the current // grapheme cluster. If the iterator is already past the end or Next() has not // yet been called, nil is returned. func (g *Graphemes) Runes() []rune { if g.start == g.end { return nil } return g.codePoints[g.start:g.end] } // Str returns a substring of the original string which corresponds to the // current grapheme cluster. If the iterator is already past the end or Next() // has not yet been called, an empty string is returned. func (g *Graphemes) Str() string { if g.start == g.end { return "" } return string(g.codePoints[g.start:g.end]) } // Bytes returns a byte slice which corresponds to the current grapheme cluster. // If the iterator is already past the end or Next() has not yet been called, // nil is returned. func (g *Graphemes) Bytes() []byte { if g.start == g.end { return nil } return []byte(string(g.codePoints[g.start:g.end])) } // Positions returns the interval of the current grapheme cluster as byte // positions into the original string. The first returned value "from" indexes // the first byte and the second returned value "to" indexes the first byte that // is not included anymore, i.e. str[from:to] is the current grapheme cluster of // the original string "str". If Next() has not yet been called, both values are // 0. If the iterator is already past the end, both values are 1. func (g *Graphemes) Positions() (int, int) { return g.indices[g.start], g.indices[g.end] } // Reset puts the iterator into its initial state such that the next call to // Next() sets it to the first grapheme cluster again. func (g *Graphemes) Reset() { g.start, g.end, g.pos, g.state = 0, 0, 0, grAny g.Next() // Parse ahead again. } // GraphemeClusterCount returns the number of user-perceived characters // (grapheme clusters) for the given string. To calculate this number, it // iterates through the string using the Graphemes iterator. func GraphemeClusterCount(s string) (n int) { g := NewGraphemes(s) for g.Next() { n++ } return } dependencies/pkg/mod/github.com/rivo/uniseg@v0.2.0/go.mod0000644000000000000000000000004715024302472022024 0ustar rootrootmodule github.com/rivo/uniseg go 1.12 dependencies/pkg/mod/github.com/rivo/uniseg@v0.2.0/doc.go0000644000000000000000000000035315024302472022012 0ustar rootroot/* Package uniseg implements Unicode Text Segmentation according to Unicode Standard Annex #29 (http://unicode.org/reports/tr29/). At this point, only the determination of grapheme cluster boundaries is implemented. */ package uniseg dependencies/pkg/mod/github.com/rivo/uniseg@v0.2.0/README.md0000644000000000000000000000431215024302472022174 0ustar rootroot# Unicode Text Segmentation for Go [![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/rivo/uniseg) [![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/rivo/uniseg) This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](http://unicode.org/reports/tr29/) (Unicode version 12.0.0). At this point, only the determination of grapheme cluster boundaries is implemented. ## Background In Go, [strings are read-only slices of bytes](https://blog.golang.org/strings). They can be turned into Unicode code points using the `for` loop or by casting: `[]rune(str)`. However, multiple code points may be combined into one user-perceived character or what the Unicode specification calls "grapheme cluster". Here are some examples: |String|Bytes (UTF-8)|Code points (runes)|Grapheme clusters| |-|-|-|-| |Käse|6 bytes: `4b 61 cc 88 73 65`|5 code points: `4b 61 308 73 65`|4 clusters: `[4b],[61 308],[73],[65]`| |🏳️‍🌈|14 bytes: `f0 9f 8f b3 ef b8 8f e2 80 8d f0 9f 8c 88`|4 code points: `1f3f3 fe0f 200d 1f308`|1 cluster: `[1f3f3 fe0f 200d 1f308]`| |🇩🇪|8 bytes: `f0 9f 87 a9 f0 9f 87 aa`|2 code points: `1f1e9 1f1ea`|1 cluster: `[1f1e9 1f1ea]`| This package provides a tool to iterate over these grapheme clusters. This may be used to determine the number of user-perceived characters, to split strings in their intended places, or to extract individual characters which form a unit. ## Installation ```bash go get github.com/rivo/uniseg ``` ## Basic Example ```go package uniseg import ( "fmt" "github.com/rivo/uniseg" ) func main() { gr := uniseg.NewGraphemes("👍🏼!") for gr.Next() { fmt.Printf("%x ", gr.Runes()) } // Output: [1f44d 1f3fc] [21] } ``` ## Documentation Refer to https://godoc.org/github.com/rivo/uniseg for the package's documentation. ## Dependencies This package does not depend on any packages outside the standard library. ## Your Feedback Add your issue here on GitHub. Feel free to get in touch if you have any questions. ## Version Version tags will be introduced once Golang modules are official. Consider this version 0.1. dependencies/pkg/mod/github.com/rivo/uniseg@v0.2.0/properties.go0000644000000000000000000045031315024302472023446 0ustar rootrootpackage uniseg // The unicode properties. Only the ones needed in the context of this package // are included. const ( prAny = iota prPreprend prCR prLF prControl prExtend prRegionalIndicator prSpacingMark prL prV prT prLV prLVT prZWJ prExtendedPictographic ) // Maps code point ranges to their properties. In the context of this package, // any code point that is not contained may map to "prAny". The code point // ranges in this slice are numerically sorted. // // These ranges were taken from // http://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakProperty.txt // as well as // https://unicode.org/Public/emoji/latest/emoji-data.txt // ("Extended_Pictographic" only) on March 11, 2019. See // https://www.unicode.org/license.html for the Unicode license agreement. var codePoints = [][3]int{ {0x0000, 0x0009, prControl}, // Cc [10] .. {0x000A, 0x000A, prLF}, // Cc {0x000B, 0x000C, prControl}, // Cc [2] .. {0x000D, 0x000D, prCR}, // Cc {0x000E, 0x001F, prControl}, // Cc [18] .. {0x007F, 0x009F, prControl}, // Cc [33] .. {0x00A9, 0x00A9, prExtendedPictographic}, // 1.1 [1] (©️) copyright {0x00AD, 0x00AD, prControl}, // Cf SOFT HYPHEN {0x00AE, 0x00AE, prExtendedPictographic}, // 1.1 [1] (®️) registered {0x0300, 0x036F, prExtend}, // Mn [112] COMBINING GRAVE ACCENT..COMBINING LATIN SMALL LETTER X {0x0483, 0x0487, prExtend}, // Mn [5] COMBINING CYRILLIC TITLO..COMBINING CYRILLIC POKRYTIE {0x0488, 0x0489, prExtend}, // Me [2] COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..COMBINING CYRILLIC MILLIONS SIGN {0x0591, 0x05BD, prExtend}, // Mn [45] HEBREW ACCENT ETNAHTA..HEBREW POINT METEG {0x05BF, 0x05BF, prExtend}, // Mn HEBREW POINT RAFE {0x05C1, 0x05C2, prExtend}, // Mn [2] HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT {0x05C4, 0x05C5, prExtend}, // Mn [2] HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT {0x05C7, 0x05C7, prExtend}, // Mn HEBREW POINT QAMATS QATAN {0x0600, 0x0605, prPreprend}, // Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER MARK ABOVE {0x0610, 0x061A, prExtend}, // Mn [11] ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..ARABIC SMALL KASRA {0x061C, 0x061C, prControl}, // Cf ARABIC LETTER MARK {0x064B, 0x065F, prExtend}, // Mn [21] ARABIC FATHATAN..ARABIC WAVY HAMZA BELOW {0x0670, 0x0670, prExtend}, // Mn ARABIC LETTER SUPERSCRIPT ALEF {0x06D6, 0x06DC, prExtend}, // Mn [7] ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA..ARABIC SMALL HIGH SEEN {0x06DD, 0x06DD, prPreprend}, // Cf ARABIC END OF AYAH {0x06DF, 0x06E4, prExtend}, // Mn [6] ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL HIGH MADDA {0x06E7, 0x06E8, prExtend}, // Mn [2] ARABIC SMALL HIGH YEH..ARABIC SMALL HIGH NOON {0x06EA, 0x06ED, prExtend}, // Mn [4] ARABIC EMPTY CENTRE LOW STOP..ARABIC SMALL LOW MEEM {0x070F, 0x070F, prPreprend}, // Cf SYRIAC ABBREVIATION MARK {0x0711, 0x0711, prExtend}, // Mn SYRIAC LETTER SUPERSCRIPT ALAPH {0x0730, 0x074A, prExtend}, // Mn [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH {0x07A6, 0x07B0, prExtend}, // Mn [11] THAANA ABAFILI..THAANA SUKUN {0x07EB, 0x07F3, prExtend}, // Mn [9] NKO COMBINING SHORT HIGH TONE..NKO COMBINING DOUBLE DOT ABOVE {0x07FD, 0x07FD, prExtend}, // Mn NKO DANTAYALAN {0x0816, 0x0819, prExtend}, // Mn [4] SAMARITAN MARK IN..SAMARITAN MARK DAGESH {0x081B, 0x0823, prExtend}, // Mn [9] SAMARITAN MARK EPENTHETIC YUT..SAMARITAN VOWEL SIGN A {0x0825, 0x0827, prExtend}, // Mn [3] SAMARITAN VOWEL SIGN SHORT A..SAMARITAN VOWEL SIGN U {0x0829, 0x082D, prExtend}, // Mn [5] SAMARITAN VOWEL SIGN LONG I..SAMARITAN MARK NEQUDAA {0x0859, 0x085B, prExtend}, // Mn [3] MANDAIC AFFRICATION MARK..MANDAIC GEMINATION MARK {0x08D3, 0x08E1, prExtend}, // Mn [15] ARABIC SMALL LOW WAW..ARABIC SMALL HIGH SIGN SAFHA {0x08E2, 0x08E2, prPreprend}, // Cf ARABIC DISPUTED END OF AYAH {0x08E3, 0x0902, prExtend}, // Mn [32] ARABIC TURNED DAMMA BELOW..DEVANAGARI SIGN ANUSVARA {0x0903, 0x0903, prSpacingMark}, // Mc DEVANAGARI SIGN VISARGA {0x093A, 0x093A, prExtend}, // Mn DEVANAGARI VOWEL SIGN OE {0x093B, 0x093B, prSpacingMark}, // Mc DEVANAGARI VOWEL SIGN OOE {0x093C, 0x093C, prExtend}, // Mn DEVANAGARI SIGN NUKTA {0x093E, 0x0940, prSpacingMark}, // Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGARI VOWEL SIGN II {0x0941, 0x0948, prExtend}, // Mn [8] DEVANAGARI VOWEL SIGN U..DEVANAGARI VOWEL SIGN AI {0x0949, 0x094C, prSpacingMark}, // Mc [4] DEVANAGARI VOWEL SIGN CANDRA O..DEVANAGARI VOWEL SIGN AU {0x094D, 0x094D, prExtend}, // Mn DEVANAGARI SIGN VIRAMA {0x094E, 0x094F, prSpacingMark}, // Mc [2] DEVANAGARI VOWEL SIGN PRISHTHAMATRA E..DEVANAGARI VOWEL SIGN AW {0x0951, 0x0957, prExtend}, // Mn [7] DEVANAGARI STRESS SIGN UDATTA..DEVANAGARI VOWEL SIGN UUE {0x0962, 0x0963, prExtend}, // Mn [2] DEVANAGARI VOWEL SIGN VOCALIC L..DEVANAGARI VOWEL SIGN VOCALIC LL {0x0981, 0x0981, prExtend}, // Mn BENGALI SIGN CANDRABINDU {0x0982, 0x0983, prSpacingMark}, // Mc [2] BENGALI SIGN ANUSVARA..BENGALI SIGN VISARGA {0x09BC, 0x09BC, prExtend}, // Mn BENGALI SIGN NUKTA {0x09BE, 0x09BE, prExtend}, // Mc BENGALI VOWEL SIGN AA {0x09BF, 0x09C0, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN I..BENGALI VOWEL SIGN II {0x09C1, 0x09C4, prExtend}, // Mn [4] BENGALI VOWEL SIGN U..BENGALI VOWEL SIGN VOCALIC RR {0x09C7, 0x09C8, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI {0x09CB, 0x09CC, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN O..BENGALI VOWEL SIGN AU {0x09CD, 0x09CD, prExtend}, // Mn BENGALI SIGN VIRAMA {0x09D7, 0x09D7, prExtend}, // Mc BENGALI AU LENGTH MARK {0x09E2, 0x09E3, prExtend}, // Mn [2] BENGALI VOWEL SIGN VOCALIC L..BENGALI VOWEL SIGN VOCALIC LL {0x09FE, 0x09FE, prExtend}, // Mn BENGALI SANDHI MARK {0x0A01, 0x0A02, prExtend}, // Mn [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN BINDI {0x0A03, 0x0A03, prSpacingMark}, // Mc GURMUKHI SIGN VISARGA {0x0A3C, 0x0A3C, prExtend}, // Mn GURMUKHI SIGN NUKTA {0x0A3E, 0x0A40, prSpacingMark}, // Mc [3] GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN II {0x0A41, 0x0A42, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN U..GURMUKHI VOWEL SIGN UU {0x0A47, 0x0A48, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN AI {0x0A4B, 0x0A4D, prExtend}, // Mn [3] GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA {0x0A51, 0x0A51, prExtend}, // Mn GURMUKHI SIGN UDAAT {0x0A70, 0x0A71, prExtend}, // Mn [2] GURMUKHI TIPPI..GURMUKHI ADDAK {0x0A75, 0x0A75, prExtend}, // Mn GURMUKHI SIGN YAKASH {0x0A81, 0x0A82, prExtend}, // Mn [2] GUJARATI SIGN CANDRABINDU..GUJARATI SIGN ANUSVARA {0x0A83, 0x0A83, prSpacingMark}, // Mc GUJARATI SIGN VISARGA {0x0ABC, 0x0ABC, prExtend}, // Mn GUJARATI SIGN NUKTA {0x0ABE, 0x0AC0, prSpacingMark}, // Mc [3] GUJARATI VOWEL SIGN AA..GUJARATI VOWEL SIGN II {0x0AC1, 0x0AC5, prExtend}, // Mn [5] GUJARATI VOWEL SIGN U..GUJARATI VOWEL SIGN CANDRA E {0x0AC7, 0x0AC8, prExtend}, // Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI {0x0AC9, 0x0AC9, prSpacingMark}, // Mc GUJARATI VOWEL SIGN CANDRA O {0x0ACB, 0x0ACC, prSpacingMark}, // Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU {0x0ACD, 0x0ACD, prExtend}, // Mn GUJARATI SIGN VIRAMA {0x0AE2, 0x0AE3, prExtend}, // Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL {0x0AFA, 0x0AFF, prExtend}, // Mn [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE {0x0B01, 0x0B01, prExtend}, // Mn ORIYA SIGN CANDRABINDU {0x0B02, 0x0B03, prSpacingMark}, // Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA {0x0B3C, 0x0B3C, prExtend}, // Mn ORIYA SIGN NUKTA {0x0B3E, 0x0B3E, prExtend}, // Mc ORIYA VOWEL SIGN AA {0x0B3F, 0x0B3F, prExtend}, // Mn ORIYA VOWEL SIGN I {0x0B40, 0x0B40, prSpacingMark}, // Mc ORIYA VOWEL SIGN II {0x0B41, 0x0B44, prExtend}, // Mn [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SIGN VOCALIC RR {0x0B47, 0x0B48, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI {0x0B4B, 0x0B4C, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SIGN AU {0x0B4D, 0x0B4D, prExtend}, // Mn ORIYA SIGN VIRAMA {0x0B56, 0x0B56, prExtend}, // Mn ORIYA AI LENGTH MARK {0x0B57, 0x0B57, prExtend}, // Mc ORIYA AU LENGTH MARK {0x0B62, 0x0B63, prExtend}, // Mn [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA VOWEL SIGN VOCALIC LL {0x0B82, 0x0B82, prExtend}, // Mn TAMIL SIGN ANUSVARA {0x0BBE, 0x0BBE, prExtend}, // Mc TAMIL VOWEL SIGN AA {0x0BBF, 0x0BBF, prSpacingMark}, // Mc TAMIL VOWEL SIGN I {0x0BC0, 0x0BC0, prExtend}, // Mn TAMIL VOWEL SIGN II {0x0BC1, 0x0BC2, prSpacingMark}, // Mc [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SIGN UU {0x0BC6, 0x0BC8, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI {0x0BCA, 0x0BCC, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SIGN AU {0x0BCD, 0x0BCD, prExtend}, // Mn TAMIL SIGN VIRAMA {0x0BD7, 0x0BD7, prExtend}, // Mc TAMIL AU LENGTH MARK {0x0C00, 0x0C00, prExtend}, // Mn TELUGU SIGN COMBINING CANDRABINDU ABOVE {0x0C01, 0x0C03, prSpacingMark}, // Mc [3] TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA {0x0C04, 0x0C04, prExtend}, // Mn TELUGU SIGN COMBINING ANUSVARA ABOVE {0x0C3E, 0x0C40, prExtend}, // Mn [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL SIGN II {0x0C41, 0x0C44, prSpacingMark}, // Mc [4] TELUGU VOWEL SIGN U..TELUGU VOWEL SIGN VOCALIC RR {0x0C46, 0x0C48, prExtend}, // Mn [3] TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI {0x0C4A, 0x0C4D, prExtend}, // Mn [4] TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA {0x0C55, 0x0C56, prExtend}, // Mn [2] TELUGU LENGTH MARK..TELUGU AI LENGTH MARK {0x0C62, 0x0C63, prExtend}, // Mn [2] TELUGU VOWEL SIGN VOCALIC L..TELUGU VOWEL SIGN VOCALIC LL {0x0C81, 0x0C81, prExtend}, // Mn KANNADA SIGN CANDRABINDU {0x0C82, 0x0C83, prSpacingMark}, // Mc [2] KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA {0x0CBC, 0x0CBC, prExtend}, // Mn KANNADA SIGN NUKTA {0x0CBE, 0x0CBE, prSpacingMark}, // Mc KANNADA VOWEL SIGN AA {0x0CBF, 0x0CBF, prExtend}, // Mn KANNADA VOWEL SIGN I {0x0CC0, 0x0CC1, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN II..KANNADA VOWEL SIGN U {0x0CC2, 0x0CC2, prExtend}, // Mc KANNADA VOWEL SIGN UU {0x0CC3, 0x0CC4, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN VOCALIC R..KANNADA VOWEL SIGN VOCALIC RR {0x0CC6, 0x0CC6, prExtend}, // Mn KANNADA VOWEL SIGN E {0x0CC7, 0x0CC8, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN EE..KANNADA VOWEL SIGN AI {0x0CCA, 0x0CCB, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN O..KANNADA VOWEL SIGN OO {0x0CCC, 0x0CCD, prExtend}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA {0x0CD5, 0x0CD6, prExtend}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prSpacingMark}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D3B, 0x0D3C, prExtend}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA {0x0D3E, 0x0D3E, prExtend}, // Mc MALAYALAM VOWEL SIGN AA {0x0D3F, 0x0D40, prSpacingMark}, // Mc [2] MALAYALAM VOWEL SIGN I..MALAYALAM VOWEL SIGN II {0x0D41, 0x0D44, prExtend}, // Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR {0x0D46, 0x0D48, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN AI {0x0D4A, 0x0D4C, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN O..MALAYALAM VOWEL SIGN AU {0x0D4D, 0x0D4D, prExtend}, // Mn MALAYALAM SIGN VIRAMA {0x0D4E, 0x0D4E, prPreprend}, // Lo MALAYALAM LETTER DOT REPH {0x0D57, 0x0D57, prExtend}, // Mc MALAYALAM AU LENGTH MARK {0x0D62, 0x0D63, prExtend}, // Mn [2] MALAYALAM VOWEL SIGN VOCALIC L..MALAYALAM VOWEL SIGN VOCALIC LL {0x0D82, 0x0D83, prSpacingMark}, // Mc [2] SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARGAYA {0x0DCA, 0x0DCA, prExtend}, // Mn SINHALA SIGN AL-LAKUNA {0x0DCF, 0x0DCF, prExtend}, // Mc SINHALA VOWEL SIGN AELA-PILLA {0x0DD0, 0x0DD1, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN KETTI AEDA-PILLA..SINHALA VOWEL SIGN DIGA AEDA-PILLA {0x0DD2, 0x0DD4, prExtend}, // Mn [3] SINHALA VOWEL SIGN KETTI IS-PILLA..SINHALA VOWEL SIGN KETTI PAA-PILLA {0x0DD6, 0x0DD6, prExtend}, // Mn SINHALA VOWEL SIGN DIGA PAA-PILLA {0x0DD8, 0x0DDE, prSpacingMark}, // Mc [7] SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOWEL SIGN KOMBUVA HAA GAYANUKITTA {0x0DDF, 0x0DDF, prExtend}, // Mc SINHALA VOWEL SIGN GAYANUKITTA {0x0DF2, 0x0DF3, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHALA VOWEL SIGN DIGA GAYANUKITTA {0x0E31, 0x0E31, prExtend}, // Mn THAI CHARACTER MAI HAN-AKAT {0x0E33, 0x0E33, prSpacingMark}, // Lo THAI CHARACTER SARA AM {0x0E34, 0x0E3A, prExtend}, // Mn [7] THAI CHARACTER SARA I..THAI CHARACTER PHINTHU {0x0E47, 0x0E4E, prExtend}, // Mn [8] THAI CHARACTER MAITAIKHU..THAI CHARACTER YAMAKKAN {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN {0x0EB3, 0x0EB3, prSpacingMark}, // Lo LAO VOWEL SIGN AM {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS {0x0F35, 0x0F35, prExtend}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA {0x0F37, 0x0F37, prExtend}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS {0x0F39, 0x0F39, prExtend}, // Mn TIBETAN MARK TSA -PHRU {0x0F3E, 0x0F3F, prSpacingMark}, // Mc [2] TIBETAN SIGN YAR TSHES..TIBETAN SIGN MAR TSHES {0x0F71, 0x0F7E, prExtend}, // Mn [14] TIBETAN VOWEL SIGN AA..TIBETAN SIGN RJES SU NGA RO {0x0F7F, 0x0F7F, prSpacingMark}, // Mc TIBETAN SIGN RNAM BCAD {0x0F80, 0x0F84, prExtend}, // Mn [5] TIBETAN VOWEL SIGN REVERSED I..TIBETAN MARK HALANTA {0x0F86, 0x0F87, prExtend}, // Mn [2] TIBETAN SIGN LCI RTAGS..TIBETAN SIGN YANG RTAGS {0x0F8D, 0x0F97, prExtend}, // Mn [11] TIBETAN SUBJOINED SIGN LCE TSA CAN..TIBETAN SUBJOINED LETTER JA {0x0F99, 0x0FBC, prExtend}, // Mn [36] TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOINED LETTER FIXED-FORM RA {0x0FC6, 0x0FC6, prExtend}, // Mn TIBETAN SYMBOL PADMA GDAN {0x102D, 0x1030, prExtend}, // Mn [4] MYANMAR VOWEL SIGN I..MYANMAR VOWEL SIGN UU {0x1031, 0x1031, prSpacingMark}, // Mc MYANMAR VOWEL SIGN E {0x1032, 0x1037, prExtend}, // Mn [6] MYANMAR VOWEL SIGN AI..MYANMAR SIGN DOT BELOW {0x1039, 0x103A, prExtend}, // Mn [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ASAT {0x103B, 0x103C, prSpacingMark}, // Mc [2] MYANMAR CONSONANT SIGN MEDIAL YA..MYANMAR CONSONANT SIGN MEDIAL RA {0x103D, 0x103E, prExtend}, // Mn [2] MYANMAR CONSONANT SIGN MEDIAL WA..MYANMAR CONSONANT SIGN MEDIAL HA {0x1056, 0x1057, prSpacingMark}, // Mc [2] MYANMAR VOWEL SIGN VOCALIC R..MYANMAR VOWEL SIGN VOCALIC RR {0x1058, 0x1059, prExtend}, // Mn [2] MYANMAR VOWEL SIGN VOCALIC L..MYANMAR VOWEL SIGN VOCALIC LL {0x105E, 0x1060, prExtend}, // Mn [3] MYANMAR CONSONANT SIGN MON MEDIAL NA..MYANMAR CONSONANT SIGN MON MEDIAL LA {0x1071, 0x1074, prExtend}, // Mn [4] MYANMAR VOWEL SIGN GEBA KAREN I..MYANMAR VOWEL SIGN KAYAH EE {0x1082, 0x1082, prExtend}, // Mn MYANMAR CONSONANT SIGN SHAN MEDIAL WA {0x1084, 0x1084, prSpacingMark}, // Mc MYANMAR VOWEL SIGN SHAN E {0x1085, 0x1086, prExtend}, // Mn [2] MYANMAR VOWEL SIGN SHAN E ABOVE..MYANMAR VOWEL SIGN SHAN FINAL Y {0x108D, 0x108D, prExtend}, // Mn MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE {0x109D, 0x109D, prExtend}, // Mn MYANMAR VOWEL SIGN AITON AI {0x1100, 0x115F, prL}, // Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHOSEONG FILLER {0x1160, 0x11A7, prV}, // Lo [72] HANGUL JUNGSEONG FILLER..HANGUL JUNGSEONG O-YAE {0x11A8, 0x11FF, prT}, // Lo [88] HANGUL JONGSEONG KIYEOK..HANGUL JONGSEONG SSANGNIEUN {0x135D, 0x135F, prExtend}, // Mn [3] ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK..ETHIOPIC COMBINING GEMINATION MARK {0x1712, 0x1714, prExtend}, // Mn [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN VIRAMA {0x1732, 0x1734, prExtend}, // Mn [3] HANUNOO VOWEL SIGN I..HANUNOO SIGN PAMUDPOD {0x1752, 0x1753, prExtend}, // Mn [2] BUHID VOWEL SIGN I..BUHID VOWEL SIGN U {0x1772, 0x1773, prExtend}, // Mn [2] TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U {0x17B4, 0x17B5, prExtend}, // Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA {0x17B6, 0x17B6, prSpacingMark}, // Mc KHMER VOWEL SIGN AA {0x17B7, 0x17BD, prExtend}, // Mn [7] KHMER VOWEL SIGN I..KHMER VOWEL SIGN UA {0x17BE, 0x17C5, prSpacingMark}, // Mc [8] KHMER VOWEL SIGN OE..KHMER VOWEL SIGN AU {0x17C6, 0x17C6, prExtend}, // Mn KHMER SIGN NIKAHIT {0x17C7, 0x17C8, prSpacingMark}, // Mc [2] KHMER SIGN REAHMUK..KHMER SIGN YUUKALEAPINTU {0x17C9, 0x17D3, prExtend}, // Mn [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN BATHAMASAT {0x17DD, 0x17DD, prExtend}, // Mn KHMER SIGN ATTHACAN {0x180B, 0x180D, prExtend}, // Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE {0x180E, 0x180E, prControl}, // Cf MONGOLIAN VOWEL SEPARATOR {0x1885, 0x1886, prExtend}, // Mn [2] MONGOLIAN LETTER ALI GALI BALUDA..MONGOLIAN LETTER ALI GALI THREE BALUDA {0x18A9, 0x18A9, prExtend}, // Mn MONGOLIAN LETTER ALI GALI DAGALGA {0x1920, 0x1922, prExtend}, // Mn [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SIGN U {0x1923, 0x1926, prSpacingMark}, // Mc [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL SIGN AU {0x1927, 0x1928, prExtend}, // Mn [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SIGN O {0x1929, 0x192B, prSpacingMark}, // Mc [3] LIMBU SUBJOINED LETTER YA..LIMBU SUBJOINED LETTER WA {0x1930, 0x1931, prSpacingMark}, // Mc [2] LIMBU SMALL LETTER KA..LIMBU SMALL LETTER NGA {0x1932, 0x1932, prExtend}, // Mn LIMBU SMALL LETTER ANUSVARA {0x1933, 0x1938, prSpacingMark}, // Mc [6] LIMBU SMALL LETTER TA..LIMBU SMALL LETTER LA {0x1939, 0x193B, prExtend}, // Mn [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I {0x1A17, 0x1A18, prExtend}, // Mn [2] BUGINESE VOWEL SIGN I..BUGINESE VOWEL SIGN U {0x1A19, 0x1A1A, prSpacingMark}, // Mc [2] BUGINESE VOWEL SIGN E..BUGINESE VOWEL SIGN O {0x1A1B, 0x1A1B, prExtend}, // Mn BUGINESE VOWEL SIGN AE {0x1A55, 0x1A55, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN MEDIAL RA {0x1A56, 0x1A56, prExtend}, // Mn TAI THAM CONSONANT SIGN MEDIAL LA {0x1A57, 0x1A57, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN LA TANG LAI {0x1A58, 0x1A5E, prExtend}, // Mn [7] TAI THAM SIGN MAI KANG LAI..TAI THAM CONSONANT SIGN SA {0x1A60, 0x1A60, prExtend}, // Mn TAI THAM SIGN SAKOT {0x1A62, 0x1A62, prExtend}, // Mn TAI THAM VOWEL SIGN MAI SAT {0x1A65, 0x1A6C, prExtend}, // Mn [8] TAI THAM VOWEL SIGN I..TAI THAM VOWEL SIGN OA BELOW {0x1A6D, 0x1A72, prSpacingMark}, // Mc [6] TAI THAM VOWEL SIGN OY..TAI THAM VOWEL SIGN THAM AI {0x1A73, 0x1A7C, prExtend}, // Mn [10] TAI THAM VOWEL SIGN OA ABOVE..TAI THAM SIGN KHUEN-LUE KARAN {0x1A7F, 0x1A7F, prExtend}, // Mn TAI THAM COMBINING CRYPTOGRAMMIC DOT {0x1AB0, 0x1ABD, prExtend}, // Mn [14] COMBINING DOUBLED CIRCUMFLEX ACCENT..COMBINING PARENTHESES BELOW {0x1ABE, 0x1ABE, prExtend}, // Me COMBINING PARENTHESES OVERLAY {0x1B00, 0x1B03, prExtend}, // Mn [4] BALINESE SIGN ULU RICEM..BALINESE SIGN SURANG {0x1B04, 0x1B04, prSpacingMark}, // Mc BALINESE SIGN BISAH {0x1B34, 0x1B34, prExtend}, // Mn BALINESE SIGN REREKAN {0x1B35, 0x1B35, prExtend}, // Mc BALINESE VOWEL SIGN TEDUNG {0x1B36, 0x1B3A, prExtend}, // Mn [5] BALINESE VOWEL SIGN ULU..BALINESE VOWEL SIGN RA REPA {0x1B3B, 0x1B3B, prSpacingMark}, // Mc BALINESE VOWEL SIGN RA REPA TEDUNG {0x1B3C, 0x1B3C, prExtend}, // Mn BALINESE VOWEL SIGN LA LENGA {0x1B3D, 0x1B41, prSpacingMark}, // Mc [5] BALINESE VOWEL SIGN LA LENGA TEDUNG..BALINESE VOWEL SIGN TALING REPA TEDUNG {0x1B42, 0x1B42, prExtend}, // Mn BALINESE VOWEL SIGN PEPET {0x1B43, 0x1B44, prSpacingMark}, // Mc [2] BALINESE VOWEL SIGN PEPET TEDUNG..BALINESE ADEG ADEG {0x1B6B, 0x1B73, prExtend}, // Mn [9] BALINESE MUSICAL SYMBOL COMBINING TEGEH..BALINESE MUSICAL SYMBOL COMBINING GONG {0x1B80, 0x1B81, prExtend}, // Mn [2] SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PANGLAYAR {0x1B82, 0x1B82, prSpacingMark}, // Mc SUNDANESE SIGN PANGWISAD {0x1BA1, 0x1BA1, prSpacingMark}, // Mc SUNDANESE CONSONANT SIGN PAMINGKAL {0x1BA2, 0x1BA5, prExtend}, // Mn [4] SUNDANESE CONSONANT SIGN PANYAKRA..SUNDANESE VOWEL SIGN PANYUKU {0x1BA6, 0x1BA7, prSpacingMark}, // Mc [2] SUNDANESE VOWEL SIGN PANAELAENG..SUNDANESE VOWEL SIGN PANOLONG {0x1BA8, 0x1BA9, prExtend}, // Mn [2] SUNDANESE VOWEL SIGN PAMEPET..SUNDANESE VOWEL SIGN PANEULEUNG {0x1BAA, 0x1BAA, prSpacingMark}, // Mc SUNDANESE SIGN PAMAAEH {0x1BAB, 0x1BAD, prExtend}, // Mn [3] SUNDANESE SIGN VIRAMA..SUNDANESE CONSONANT SIGN PASANGAN WA {0x1BE6, 0x1BE6, prExtend}, // Mn BATAK SIGN TOMPI {0x1BE7, 0x1BE7, prSpacingMark}, // Mc BATAK VOWEL SIGN E {0x1BE8, 0x1BE9, prExtend}, // Mn [2] BATAK VOWEL SIGN PAKPAK E..BATAK VOWEL SIGN EE {0x1BEA, 0x1BEC, prSpacingMark}, // Mc [3] BATAK VOWEL SIGN I..BATAK VOWEL SIGN O {0x1BED, 0x1BED, prExtend}, // Mn BATAK VOWEL SIGN KARO O {0x1BEE, 0x1BEE, prSpacingMark}, // Mc BATAK VOWEL SIGN U {0x1BEF, 0x1BF1, prExtend}, // Mn [3] BATAK VOWEL SIGN U FOR SIMALUNGUN SA..BATAK CONSONANT SIGN H {0x1BF2, 0x1BF3, prSpacingMark}, // Mc [2] BATAK PANGOLAT..BATAK PANONGONAN {0x1C24, 0x1C2B, prSpacingMark}, // Mc [8] LEPCHA SUBJOINED LETTER YA..LEPCHA VOWEL SIGN UU {0x1C2C, 0x1C33, prExtend}, // Mn [8] LEPCHA VOWEL SIGN E..LEPCHA CONSONANT SIGN T {0x1C34, 0x1C35, prSpacingMark}, // Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEPCHA CONSONANT SIGN KANG {0x1C36, 0x1C37, prExtend}, // Mn [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA {0x1CD0, 0x1CD2, prExtend}, // Mn [3] VEDIC TONE KARSHANA..VEDIC TONE PRENKHA {0x1CD4, 0x1CE0, prExtend}, // Mn [13] VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA {0x1CE1, 0x1CE1, prSpacingMark}, // Mc VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA {0x1CE2, 0x1CE8, prExtend}, // Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC SIGN VISARGA ANUDATTA WITH TAIL {0x1CED, 0x1CED, prExtend}, // Mn VEDIC SIGN TIRYAK {0x1CF4, 0x1CF4, prExtend}, // Mn VEDIC TONE CANDRA ABOVE {0x1CF7, 0x1CF7, prSpacingMark}, // Mc VEDIC SIGN ATIKRAMA {0x1CF8, 0x1CF9, prExtend}, // Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE {0x1DC0, 0x1DF9, prExtend}, // Mn [58] COMBINING DOTTED GRAVE ACCENT..COMBINING WIDE INVERTED BRIDGE BELOW {0x1DFB, 0x1DFF, prExtend}, // Mn [5] COMBINING DELETION MARK..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW {0x200B, 0x200B, prControl}, // Cf ZERO WIDTH SPACE {0x200C, 0x200C, prExtend}, // Cf ZERO WIDTH NON-JOINER {0x200D, 0x200D, prZWJ}, // Cf ZERO WIDTH JOINER {0x200E, 0x200F, prControl}, // Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT MARK {0x2028, 0x2028, prControl}, // Zl LINE SEPARATOR {0x2029, 0x2029, prControl}, // Zp PARAGRAPH SEPARATOR {0x202A, 0x202E, prControl}, // Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE {0x203C, 0x203C, prExtendedPictographic}, // 1.1 [1] (‼️) double exclamation mark {0x2049, 0x2049, prExtendedPictographic}, // 3.0 [1] (⁉️) exclamation question mark {0x2060, 0x2064, prControl}, // Cf [5] WORD JOINER..INVISIBLE PLUS {0x2065, 0x2065, prControl}, // Cn {0x2066, 0x206F, prControl}, // Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES {0x20D0, 0x20DC, prExtend}, // Mn [13] COMBINING LEFT HARPOON ABOVE..COMBINING FOUR DOTS ABOVE {0x20DD, 0x20E0, prExtend}, // Me [4] COMBINING ENCLOSING CIRCLE..COMBINING ENCLOSING CIRCLE BACKSLASH {0x20E1, 0x20E1, prExtend}, // Mn COMBINING LEFT RIGHT ARROW ABOVE {0x20E2, 0x20E4, prExtend}, // Me [3] COMBINING ENCLOSING SCREEN..COMBINING ENCLOSING UPWARD POINTING TRIANGLE {0x20E5, 0x20F0, prExtend}, // Mn [12] COMBINING REVERSE SOLIDUS OVERLAY..COMBINING ASTERISK ABOVE {0x2122, 0x2122, prExtendedPictographic}, // 1.1 [1] (™️) trade mark {0x2139, 0x2139, prExtendedPictographic}, // 3.0 [1] (ℹ️) information {0x2194, 0x2199, prExtendedPictographic}, // 1.1 [6] (↔️..↙️) left-right arrow..down-left arrow {0x21A9, 0x21AA, prExtendedPictographic}, // 1.1 [2] (↩️..↪️) right arrow curving left..left arrow curving right {0x231A, 0x231B, prExtendedPictographic}, // 1.1 [2] (⌚..⌛) watch..hourglass done {0x2328, 0x2328, prExtendedPictographic}, // 1.1 [1] (⌨️) keyboard {0x2388, 0x2388, prExtendedPictographic}, // 3.0 [1] (⎈) HELM SYMBOL {0x23CF, 0x23CF, prExtendedPictographic}, // 4.0 [1] (⏏️) eject button {0x23E9, 0x23F3, prExtendedPictographic}, // 6.0 [11] (⏩..⏳) fast-forward button..hourglass not done {0x23F8, 0x23FA, prExtendedPictographic}, // 7.0 [3] (⏸️..⏺️) pause button..record button {0x24C2, 0x24C2, prExtendedPictographic}, // 1.1 [1] (Ⓜ️) circled M {0x25AA, 0x25AB, prExtendedPictographic}, // 1.1 [2] (▪️..▫️) black small square..white small square {0x25B6, 0x25B6, prExtendedPictographic}, // 1.1 [1] (▶️) play button {0x25C0, 0x25C0, prExtendedPictographic}, // 1.1 [1] (◀️) reverse button {0x25FB, 0x25FE, prExtendedPictographic}, // 3.2 [4] (◻️..◾) white medium square..black medium-small square {0x2600, 0x2605, prExtendedPictographic}, // 1.1 [6] (☀️..★) sun..BLACK STAR {0x2607, 0x2612, prExtendedPictographic}, // 1.1 [12] (☇..☒) LIGHTNING..BALLOT BOX WITH X {0x2614, 0x2615, prExtendedPictographic}, // 4.0 [2] (☔..☕) umbrella with rain drops..hot beverage {0x2616, 0x2617, prExtendedPictographic}, // 3.2 [2] (☖..☗) WHITE SHOGI PIECE..BLACK SHOGI PIECE {0x2618, 0x2618, prExtendedPictographic}, // 4.1 [1] (☘️) shamrock {0x2619, 0x2619, prExtendedPictographic}, // 3.0 [1] (☙) REVERSED ROTATED FLORAL HEART BULLET {0x261A, 0x266F, prExtendedPictographic}, // 1.1 [86] (☚..♯) BLACK LEFT POINTING INDEX..MUSIC SHARP SIGN {0x2670, 0x2671, prExtendedPictographic}, // 3.0 [2] (♰..♱) WEST SYRIAC CROSS..EAST SYRIAC CROSS {0x2672, 0x267D, prExtendedPictographic}, // 3.2 [12] (♲..♽) UNIVERSAL RECYCLING SYMBOL..PARTIALLY-RECYCLED PAPER SYMBOL {0x267E, 0x267F, prExtendedPictographic}, // 4.1 [2] (♾️..♿) infinity..wheelchair symbol {0x2680, 0x2685, prExtendedPictographic}, // 3.2 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6 {0x2690, 0x2691, prExtendedPictographic}, // 4.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG {0x2692, 0x269C, prExtendedPictographic}, // 4.1 [11] (⚒️..⚜️) hammer and pick..fleur-de-lis {0x269D, 0x269D, prExtendedPictographic}, // 5.1 [1] (⚝) OUTLINED WHITE STAR {0x269E, 0x269F, prExtendedPictographic}, // 5.2 [2] (⚞..⚟) THREE LINES CONVERGING RIGHT..THREE LINES CONVERGING LEFT {0x26A0, 0x26A1, prExtendedPictographic}, // 4.0 [2] (⚠️..⚡) warning..high voltage {0x26A2, 0x26B1, prExtendedPictographic}, // 4.1 [16] (⚢..⚱️) DOUBLED FEMALE SIGN..funeral urn {0x26B2, 0x26B2, prExtendedPictographic}, // 5.0 [1] (⚲) NEUTER {0x26B3, 0x26BC, prExtendedPictographic}, // 5.1 [10] (⚳..⚼) CERES..SESQUIQUADRATE {0x26BD, 0x26BF, prExtendedPictographic}, // 5.2 [3] (⚽..⚿) soccer ball..SQUARED KEY {0x26C0, 0x26C3, prExtendedPictographic}, // 5.1 [4] (⛀..⛃) WHITE DRAUGHTS MAN..BLACK DRAUGHTS KING {0x26C4, 0x26CD, prExtendedPictographic}, // 5.2 [10] (⛄..⛍) snowman without snow..DISABLED CAR {0x26CE, 0x26CE, prExtendedPictographic}, // 6.0 [1] (⛎) Ophiuchus {0x26CF, 0x26E1, prExtendedPictographic}, // 5.2 [19] (⛏️..⛡) pick..RESTRICTED LEFT ENTRY-2 {0x26E2, 0x26E2, prExtendedPictographic}, // 6.0 [1] (⛢) ASTRONOMICAL SYMBOL FOR URANUS {0x26E3, 0x26E3, prExtendedPictographic}, // 5.2 [1] (⛣) HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE {0x26E4, 0x26E7, prExtendedPictographic}, // 6.0 [4] (⛤..⛧) PENTAGRAM..INVERTED PENTAGRAM {0x26E8, 0x26FF, prExtendedPictographic}, // 5.2 [24] (⛨..⛿) BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZONTAL MIDDLE BLACK STRIPE {0x2700, 0x2700, prExtendedPictographic}, // 7.0 [1] (✀) BLACK SAFETY SCISSORS {0x2701, 0x2704, prExtendedPictographic}, // 1.1 [4] (✁..✄) UPPER BLADE SCISSORS..WHITE SCISSORS {0x2705, 0x2705, prExtendedPictographic}, // 6.0 [1] (✅) check mark button {0x2708, 0x2709, prExtendedPictographic}, // 1.1 [2] (✈️..✉️) airplane..envelope {0x270A, 0x270B, prExtendedPictographic}, // 6.0 [2] (✊..✋) raised fist..raised hand {0x270C, 0x2712, prExtendedPictographic}, // 1.1 [7] (✌️..✒️) victory hand..black nib {0x2714, 0x2714, prExtendedPictographic}, // 1.1 [1] (✔️) check mark {0x2716, 0x2716, prExtendedPictographic}, // 1.1 [1] (✖️) multiplication sign {0x271D, 0x271D, prExtendedPictographic}, // 1.1 [1] (✝️) latin cross {0x2721, 0x2721, prExtendedPictographic}, // 1.1 [1] (✡️) star of David {0x2728, 0x2728, prExtendedPictographic}, // 6.0 [1] (✨) sparkles {0x2733, 0x2734, prExtendedPictographic}, // 1.1 [2] (✳️..✴️) eight-spoked asterisk..eight-pointed star {0x2744, 0x2744, prExtendedPictographic}, // 1.1 [1] (❄️) snowflake {0x2747, 0x2747, prExtendedPictographic}, // 1.1 [1] (❇️) sparkle {0x274C, 0x274C, prExtendedPictographic}, // 6.0 [1] (❌) cross mark {0x274E, 0x274E, prExtendedPictographic}, // 6.0 [1] (❎) cross mark button {0x2753, 0x2755, prExtendedPictographic}, // 6.0 [3] (❓..❕) question mark..white exclamation mark {0x2757, 0x2757, prExtendedPictographic}, // 5.2 [1] (❗) exclamation mark {0x2763, 0x2767, prExtendedPictographic}, // 1.1 [5] (❣️..❧) heart exclamation..ROTATED FLORAL HEART BULLET {0x2795, 0x2797, prExtendedPictographic}, // 6.0 [3] (➕..➗) plus sign..division sign {0x27A1, 0x27A1, prExtendedPictographic}, // 1.1 [1] (➡️) right arrow {0x27B0, 0x27B0, prExtendedPictographic}, // 6.0 [1] (➰) curly loop {0x27BF, 0x27BF, prExtendedPictographic}, // 6.0 [1] (➿) double curly loop {0x2934, 0x2935, prExtendedPictographic}, // 3.2 [2] (⤴️..⤵️) right arrow curving up..right arrow curving down {0x2B05, 0x2B07, prExtendedPictographic}, // 4.0 [3] (⬅️..⬇️) left arrow..down arrow {0x2B1B, 0x2B1C, prExtendedPictographic}, // 5.1 [2] (⬛..⬜) black large square..white large square {0x2B50, 0x2B50, prExtendedPictographic}, // 5.1 [1] (⭐) star {0x2B55, 0x2B55, prExtendedPictographic}, // 5.2 [1] (⭕) hollow red circle {0x2CEF, 0x2CF1, prExtend}, // Mn [3] COPTIC COMBINING NI ABOVE..COPTIC COMBINING SPIRITUS LENIS {0x2D7F, 0x2D7F, prExtend}, // Mn TIFINAGH CONSONANT JOINER {0x2DE0, 0x2DFF, prExtend}, // Mn [32] COMBINING CYRILLIC LETTER BE..COMBINING CYRILLIC LETTER IOTIFIED BIG YUS {0x302A, 0x302D, prExtend}, // Mn [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENTERING TONE MARK {0x302E, 0x302F, prExtend}, // Mc [2] HANGUL SINGLE DOT TONE MARK..HANGUL DOUBLE DOT TONE MARK {0x3030, 0x3030, prExtendedPictographic}, // 1.1 [1] (〰️) wavy dash {0x303D, 0x303D, prExtendedPictographic}, // 3.2 [1] (〽️) part alternation mark {0x3099, 0x309A, prExtend}, // Mn [2] COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK..COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK {0x3297, 0x3297, prExtendedPictographic}, // 1.1 [1] (㊗️) Japanese “congratulations” button {0x3299, 0x3299, prExtendedPictographic}, // 1.1 [1] (㊙️) Japanese “secret” button {0xA66F, 0xA66F, prExtend}, // Mn COMBINING CYRILLIC VZMET {0xA670, 0xA672, prExtend}, // Me [3] COMBINING CYRILLIC TEN MILLIONS SIGN..COMBINING CYRILLIC THOUSAND MILLIONS SIGN {0xA674, 0xA67D, prExtend}, // Mn [10] COMBINING CYRILLIC LETTER UKRAINIAN IE..COMBINING CYRILLIC PAYEROK {0xA69E, 0xA69F, prExtend}, // Mn [2] COMBINING CYRILLIC LETTER EF..COMBINING CYRILLIC LETTER IOTIFIED E {0xA6F0, 0xA6F1, prExtend}, // Mn [2] BAMUM COMBINING MARK KOQNDON..BAMUM COMBINING MARK TUKWENTIS {0xA802, 0xA802, prExtend}, // Mn SYLOTI NAGRI SIGN DVISVARA {0xA806, 0xA806, prExtend}, // Mn SYLOTI NAGRI SIGN HASANTA {0xA80B, 0xA80B, prExtend}, // Mn SYLOTI NAGRI SIGN ANUSVARA {0xA823, 0xA824, prSpacingMark}, // Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI NAGRI VOWEL SIGN I {0xA825, 0xA826, prExtend}, // Mn [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI NAGRI VOWEL SIGN E {0xA827, 0xA827, prSpacingMark}, // Mc SYLOTI NAGRI VOWEL SIGN OO {0xA880, 0xA881, prSpacingMark}, // Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VISARGA {0xA8B4, 0xA8C3, prSpacingMark}, // Mc [16] SAURASHTRA CONSONANT SIGN HAARU..SAURASHTRA VOWEL SIGN AU {0xA8C4, 0xA8C5, prExtend}, // Mn [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA SIGN CANDRABINDU {0xA8E0, 0xA8F1, prExtend}, // Mn [18] COMBINING DEVANAGARI DIGIT ZERO..COMBINING DEVANAGARI SIGN AVAGRAHA {0xA8FF, 0xA8FF, prExtend}, // Mn DEVANAGARI VOWEL SIGN AY {0xA926, 0xA92D, prExtend}, // Mn [8] KAYAH LI VOWEL UE..KAYAH LI TONE CALYA PLOPHU {0xA947, 0xA951, prExtend}, // Mn [11] REJANG VOWEL SIGN I..REJANG CONSONANT SIGN R {0xA952, 0xA953, prSpacingMark}, // Mc [2] REJANG CONSONANT SIGN H..REJANG VIRAMA {0xA960, 0xA97C, prL}, // Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANGUL CHOSEONG SSANGYEORINHIEUH {0xA980, 0xA982, prExtend}, // Mn [3] JAVANESE SIGN PANYANGGA..JAVANESE SIGN LAYAR {0xA983, 0xA983, prSpacingMark}, // Mc JAVANESE SIGN WIGNYAN {0xA9B3, 0xA9B3, prExtend}, // Mn JAVANESE SIGN CECAK TELU {0xA9B4, 0xA9B5, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TARUNG..JAVANESE VOWEL SIGN TOLONG {0xA9B6, 0xA9B9, prExtend}, // Mn [4] JAVANESE VOWEL SIGN WULU..JAVANESE VOWEL SIGN SUKU MENDUT {0xA9BA, 0xA9BB, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TALING..JAVANESE VOWEL SIGN DIRGA MURE {0xA9BC, 0xA9BD, prExtend}, // Mn [2] JAVANESE VOWEL SIGN PEPET..JAVANESE CONSONANT SIGN KERET {0xA9BE, 0xA9C0, prSpacingMark}, // Mc [3] JAVANESE CONSONANT SIGN PENGKAL..JAVANESE PANGKON {0xA9E5, 0xA9E5, prExtend}, // Mn MYANMAR SIGN SHAN SAW {0xAA29, 0xAA2E, prExtend}, // Mn [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIGN OE {0xAA2F, 0xAA30, prSpacingMark}, // Mc [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI {0xAA31, 0xAA32, prExtend}, // Mn [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIGN UE {0xAA33, 0xAA34, prSpacingMark}, // Mc [2] CHAM CONSONANT SIGN YA..CHAM CONSONANT SIGN RA {0xAA35, 0xAA36, prExtend}, // Mn [2] CHAM CONSONANT SIGN LA..CHAM CONSONANT SIGN WA {0xAA43, 0xAA43, prExtend}, // Mn CHAM CONSONANT SIGN FINAL NG {0xAA4C, 0xAA4C, prExtend}, // Mn CHAM CONSONANT SIGN FINAL M {0xAA4D, 0xAA4D, prSpacingMark}, // Mc CHAM CONSONANT SIGN FINAL H {0xAA7C, 0xAA7C, prExtend}, // Mn MYANMAR SIGN TAI LAING TONE-2 {0xAAB0, 0xAAB0, prExtend}, // Mn TAI VIET MAI KANG {0xAAB2, 0xAAB4, prExtend}, // Mn [3] TAI VIET VOWEL I..TAI VIET VOWEL U {0xAAB7, 0xAAB8, prExtend}, // Mn [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA {0xAABE, 0xAABF, prExtend}, // Mn [2] TAI VIET VOWEL AM..TAI VIET TONE MAI EK {0xAAC1, 0xAAC1, prExtend}, // Mn TAI VIET TONE MAI THO {0xAAEB, 0xAAEB, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN II {0xAAEC, 0xAAED, prExtend}, // Mn [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI MAYEK VOWEL SIGN AAI {0xAAEE, 0xAAEF, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI MAYEK VOWEL SIGN AAU {0xAAF5, 0xAAF5, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN VISARGA {0xAAF6, 0xAAF6, prExtend}, // Mn MEETEI MAYEK VIRAMA {0xABE3, 0xABE4, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP {0xABE5, 0xABE5, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN ANAP {0xABE6, 0xABE7, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN YENAP..MEETEI MAYEK VOWEL SIGN SOUNAP {0xABE8, 0xABE8, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN UNAP {0xABE9, 0xABEA, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN CHEINAP..MEETEI MAYEK VOWEL SIGN NUNG {0xABEC, 0xABEC, prSpacingMark}, // Mc MEETEI MAYEK LUM IYEK {0xABED, 0xABED, prExtend}, // Mn MEETEI MAYEK APUN IYEK {0xAC00, 0xAC00, prLV}, // Lo HANGUL SYLLABLE GA {0xAC01, 0xAC1B, prLVT}, // Lo [27] HANGUL SYLLABLE GAG..HANGUL SYLLABLE GAH {0xAC1C, 0xAC1C, prLV}, // Lo HANGUL SYLLABLE GAE {0xAC1D, 0xAC37, prLVT}, // Lo [27] HANGUL SYLLABLE GAEG..HANGUL SYLLABLE GAEH {0xAC38, 0xAC38, prLV}, // Lo HANGUL SYLLABLE GYA {0xAC39, 0xAC53, prLVT}, // Lo [27] HANGUL SYLLABLE GYAG..HANGUL SYLLABLE GYAH {0xAC54, 0xAC54, prLV}, // Lo HANGUL SYLLABLE GYAE {0xAC55, 0xAC6F, prLVT}, // Lo [27] HANGUL SYLLABLE GYAEG..HANGUL SYLLABLE GYAEH {0xAC70, 0xAC70, prLV}, // Lo HANGUL SYLLABLE GEO {0xAC71, 0xAC8B, prLVT}, // Lo [27] HANGUL SYLLABLE GEOG..HANGUL SYLLABLE GEOH {0xAC8C, 0xAC8C, prLV}, // Lo HANGUL SYLLABLE GE {0xAC8D, 0xACA7, prLVT}, // Lo [27] HANGUL SYLLABLE GEG..HANGUL SYLLABLE GEH {0xACA8, 0xACA8, prLV}, // Lo HANGUL SYLLABLE GYEO {0xACA9, 0xACC3, prLVT}, // Lo [27] HANGUL SYLLABLE GYEOG..HANGUL SYLLABLE GYEOH {0xACC4, 0xACC4, prLV}, // Lo HANGUL SYLLABLE GYE {0xACC5, 0xACDF, prLVT}, // Lo [27] HANGUL SYLLABLE GYEG..HANGUL SYLLABLE GYEH {0xACE0, 0xACE0, prLV}, // Lo HANGUL SYLLABLE GO {0xACE1, 0xACFB, prLVT}, // Lo [27] HANGUL SYLLABLE GOG..HANGUL SYLLABLE GOH {0xACFC, 0xACFC, prLV}, // Lo HANGUL SYLLABLE GWA {0xACFD, 0xAD17, prLVT}, // Lo [27] HANGUL SYLLABLE GWAG..HANGUL SYLLABLE GWAH {0xAD18, 0xAD18, prLV}, // Lo HANGUL SYLLABLE GWAE {0xAD19, 0xAD33, prLVT}, // Lo [27] HANGUL SYLLABLE GWAEG..HANGUL SYLLABLE GWAEH {0xAD34, 0xAD34, prLV}, // Lo HANGUL SYLLABLE GOE {0xAD35, 0xAD4F, prLVT}, // Lo [27] HANGUL SYLLABLE GOEG..HANGUL SYLLABLE GOEH {0xAD50, 0xAD50, prLV}, // Lo HANGUL SYLLABLE GYO {0xAD51, 0xAD6B, prLVT}, // Lo [27] HANGUL SYLLABLE GYOG..HANGUL SYLLABLE GYOH {0xAD6C, 0xAD6C, prLV}, // Lo HANGUL SYLLABLE GU {0xAD6D, 0xAD87, prLVT}, // Lo [27] HANGUL SYLLABLE GUG..HANGUL SYLLABLE GUH {0xAD88, 0xAD88, prLV}, // Lo HANGUL SYLLABLE GWEO {0xAD89, 0xADA3, prLVT}, // Lo [27] HANGUL SYLLABLE GWEOG..HANGUL SYLLABLE GWEOH {0xADA4, 0xADA4, prLV}, // Lo HANGUL SYLLABLE GWE {0xADA5, 0xADBF, prLVT}, // Lo [27] HANGUL SYLLABLE GWEG..HANGUL SYLLABLE GWEH {0xADC0, 0xADC0, prLV}, // Lo HANGUL SYLLABLE GWI {0xADC1, 0xADDB, prLVT}, // Lo [27] HANGUL SYLLABLE GWIG..HANGUL SYLLABLE GWIH {0xADDC, 0xADDC, prLV}, // Lo HANGUL SYLLABLE GYU {0xADDD, 0xADF7, prLVT}, // Lo [27] HANGUL SYLLABLE GYUG..HANGUL SYLLABLE GYUH {0xADF8, 0xADF8, prLV}, // Lo HANGUL SYLLABLE GEU {0xADF9, 0xAE13, prLVT}, // Lo [27] HANGUL SYLLABLE GEUG..HANGUL SYLLABLE GEUH {0xAE14, 0xAE14, prLV}, // Lo HANGUL SYLLABLE GYI {0xAE15, 0xAE2F, prLVT}, // Lo [27] HANGUL SYLLABLE GYIG..HANGUL SYLLABLE GYIH {0xAE30, 0xAE30, prLV}, // Lo HANGUL SYLLABLE GI {0xAE31, 0xAE4B, prLVT}, // Lo [27] HANGUL SYLLABLE GIG..HANGUL SYLLABLE GIH {0xAE4C, 0xAE4C, prLV}, // Lo HANGUL SYLLABLE GGA {0xAE4D, 0xAE67, prLVT}, // Lo [27] HANGUL SYLLABLE GGAG..HANGUL SYLLABLE GGAH {0xAE68, 0xAE68, prLV}, // Lo HANGUL SYLLABLE GGAE {0xAE69, 0xAE83, prLVT}, // Lo [27] HANGUL SYLLABLE GGAEG..HANGUL SYLLABLE GGAEH {0xAE84, 0xAE84, prLV}, // Lo HANGUL SYLLABLE GGYA {0xAE85, 0xAE9F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAG..HANGUL SYLLABLE GGYAH {0xAEA0, 0xAEA0, prLV}, // Lo HANGUL SYLLABLE GGYAE {0xAEA1, 0xAEBB, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAEG..HANGUL SYLLABLE GGYAEH {0xAEBC, 0xAEBC, prLV}, // Lo HANGUL SYLLABLE GGEO {0xAEBD, 0xAED7, prLVT}, // Lo [27] HANGUL SYLLABLE GGEOG..HANGUL SYLLABLE GGEOH {0xAED8, 0xAED8, prLV}, // Lo HANGUL SYLLABLE GGE {0xAED9, 0xAEF3, prLVT}, // Lo [27] HANGUL SYLLABLE GGEG..HANGUL SYLLABLE GGEH {0xAEF4, 0xAEF4, prLV}, // Lo HANGUL SYLLABLE GGYEO {0xAEF5, 0xAF0F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEOG..HANGUL SYLLABLE GGYEOH {0xAF10, 0xAF10, prLV}, // Lo HANGUL SYLLABLE GGYE {0xAF11, 0xAF2B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEG..HANGUL SYLLABLE GGYEH {0xAF2C, 0xAF2C, prLV}, // Lo HANGUL SYLLABLE GGO {0xAF2D, 0xAF47, prLVT}, // Lo [27] HANGUL SYLLABLE GGOG..HANGUL SYLLABLE GGOH {0xAF48, 0xAF48, prLV}, // Lo HANGUL SYLLABLE GGWA {0xAF49, 0xAF63, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAG..HANGUL SYLLABLE GGWAH {0xAF64, 0xAF64, prLV}, // Lo HANGUL SYLLABLE GGWAE {0xAF65, 0xAF7F, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAEG..HANGUL SYLLABLE GGWAEH {0xAF80, 0xAF80, prLV}, // Lo HANGUL SYLLABLE GGOE {0xAF81, 0xAF9B, prLVT}, // Lo [27] HANGUL SYLLABLE GGOEG..HANGUL SYLLABLE GGOEH {0xAF9C, 0xAF9C, prLV}, // Lo HANGUL SYLLABLE GGYO {0xAF9D, 0xAFB7, prLVT}, // Lo [27] HANGUL SYLLABLE GGYOG..HANGUL SYLLABLE GGYOH {0xAFB8, 0xAFB8, prLV}, // Lo HANGUL SYLLABLE GGU {0xAFB9, 0xAFD3, prLVT}, // Lo [27] HANGUL SYLLABLE GGUG..HANGUL SYLLABLE GGUH {0xAFD4, 0xAFD4, prLV}, // Lo HANGUL SYLLABLE GGWEO {0xAFD5, 0xAFEF, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEOG..HANGUL SYLLABLE GGWEOH {0xAFF0, 0xAFF0, prLV}, // Lo HANGUL SYLLABLE GGWE {0xAFF1, 0xB00B, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEG..HANGUL SYLLABLE GGWEH {0xB00C, 0xB00C, prLV}, // Lo HANGUL SYLLABLE GGWI {0xB00D, 0xB027, prLVT}, // Lo [27] HANGUL SYLLABLE GGWIG..HANGUL SYLLABLE GGWIH {0xB028, 0xB028, prLV}, // Lo HANGUL SYLLABLE GGYU {0xB029, 0xB043, prLVT}, // Lo [27] HANGUL SYLLABLE GGYUG..HANGUL SYLLABLE GGYUH {0xB044, 0xB044, prLV}, // Lo HANGUL SYLLABLE GGEU {0xB045, 0xB05F, prLVT}, // Lo [27] HANGUL SYLLABLE GGEUG..HANGUL SYLLABLE GGEUH {0xB060, 0xB060, prLV}, // Lo HANGUL SYLLABLE GGYI {0xB061, 0xB07B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYIG..HANGUL SYLLABLE GGYIH {0xB07C, 0xB07C, prLV}, // Lo HANGUL SYLLABLE GGI {0xB07D, 0xB097, prLVT}, // Lo [27] HANGUL SYLLABLE GGIG..HANGUL SYLLABLE GGIH {0xB098, 0xB098, prLV}, // Lo HANGUL SYLLABLE NA {0xB099, 0xB0B3, prLVT}, // Lo [27] HANGUL SYLLABLE NAG..HANGUL SYLLABLE NAH {0xB0B4, 0xB0B4, prLV}, // Lo HANGUL SYLLABLE NAE {0xB0B5, 0xB0CF, prLVT}, // Lo [27] HANGUL SYLLABLE NAEG..HANGUL SYLLABLE NAEH {0xB0D0, 0xB0D0, prLV}, // Lo HANGUL SYLLABLE NYA {0xB0D1, 0xB0EB, prLVT}, // Lo [27] HANGUL SYLLABLE NYAG..HANGUL SYLLABLE NYAH {0xB0EC, 0xB0EC, prLV}, // Lo HANGUL SYLLABLE NYAE {0xB0ED, 0xB107, prLVT}, // Lo [27] HANGUL SYLLABLE NYAEG..HANGUL SYLLABLE NYAEH {0xB108, 0xB108, prLV}, // Lo HANGUL SYLLABLE NEO {0xB109, 0xB123, prLVT}, // Lo [27] HANGUL SYLLABLE NEOG..HANGUL SYLLABLE NEOH {0xB124, 0xB124, prLV}, // Lo HANGUL SYLLABLE NE {0xB125, 0xB13F, prLVT}, // Lo [27] HANGUL SYLLABLE NEG..HANGUL SYLLABLE NEH {0xB140, 0xB140, prLV}, // Lo HANGUL SYLLABLE NYEO {0xB141, 0xB15B, prLVT}, // Lo [27] HANGUL SYLLABLE NYEOG..HANGUL SYLLABLE NYEOH {0xB15C, 0xB15C, prLV}, // Lo HANGUL SYLLABLE NYE {0xB15D, 0xB177, prLVT}, // Lo [27] HANGUL SYLLABLE NYEG..HANGUL SYLLABLE NYEH {0xB178, 0xB178, prLV}, // Lo HANGUL SYLLABLE NO {0xB179, 0xB193, prLVT}, // Lo [27] HANGUL SYLLABLE NOG..HANGUL SYLLABLE NOH {0xB194, 0xB194, prLV}, // Lo HANGUL SYLLABLE NWA {0xB195, 0xB1AF, prLVT}, // Lo [27] HANGUL SYLLABLE NWAG..HANGUL SYLLABLE NWAH {0xB1B0, 0xB1B0, prLV}, // Lo HANGUL SYLLABLE NWAE {0xB1B1, 0xB1CB, prLVT}, // Lo [27] HANGUL SYLLABLE NWAEG..HANGUL SYLLABLE NWAEH {0xB1CC, 0xB1CC, prLV}, // Lo HANGUL SYLLABLE NOE {0xB1CD, 0xB1E7, prLVT}, // Lo [27] HANGUL SYLLABLE NOEG..HANGUL SYLLABLE NOEH {0xB1E8, 0xB1E8, prLV}, // Lo HANGUL SYLLABLE NYO {0xB1E9, 0xB203, prLVT}, // Lo [27] HANGUL SYLLABLE NYOG..HANGUL SYLLABLE NYOH {0xB204, 0xB204, prLV}, // Lo HANGUL SYLLABLE NU {0xB205, 0xB21F, prLVT}, // Lo [27] HANGUL SYLLABLE NUG..HANGUL SYLLABLE NUH {0xB220, 0xB220, prLV}, // Lo HANGUL SYLLABLE NWEO {0xB221, 0xB23B, prLVT}, // Lo [27] HANGUL SYLLABLE NWEOG..HANGUL SYLLABLE NWEOH {0xB23C, 0xB23C, prLV}, // Lo HANGUL SYLLABLE NWE {0xB23D, 0xB257, prLVT}, // Lo [27] HANGUL SYLLABLE NWEG..HANGUL SYLLABLE NWEH {0xB258, 0xB258, prLV}, // Lo HANGUL SYLLABLE NWI {0xB259, 0xB273, prLVT}, // Lo [27] HANGUL SYLLABLE NWIG..HANGUL SYLLABLE NWIH {0xB274, 0xB274, prLV}, // Lo HANGUL SYLLABLE NYU {0xB275, 0xB28F, prLVT}, // Lo [27] HANGUL SYLLABLE NYUG..HANGUL SYLLABLE NYUH {0xB290, 0xB290, prLV}, // Lo HANGUL SYLLABLE NEU {0xB291, 0xB2AB, prLVT}, // Lo [27] HANGUL SYLLABLE NEUG..HANGUL SYLLABLE NEUH {0xB2AC, 0xB2AC, prLV}, // Lo HANGUL SYLLABLE NYI {0xB2AD, 0xB2C7, prLVT}, // Lo [27] HANGUL SYLLABLE NYIG..HANGUL SYLLABLE NYIH {0xB2C8, 0xB2C8, prLV}, // Lo HANGUL SYLLABLE NI {0xB2C9, 0xB2E3, prLVT}, // Lo [27] HANGUL SYLLABLE NIG..HANGUL SYLLABLE NIH {0xB2E4, 0xB2E4, prLV}, // Lo HANGUL SYLLABLE DA {0xB2E5, 0xB2FF, prLVT}, // Lo [27] HANGUL SYLLABLE DAG..HANGUL SYLLABLE DAH {0xB300, 0xB300, prLV}, // Lo HANGUL SYLLABLE DAE {0xB301, 0xB31B, prLVT}, // Lo [27] HANGUL SYLLABLE DAEG..HANGUL SYLLABLE DAEH {0xB31C, 0xB31C, prLV}, // Lo HANGUL SYLLABLE DYA {0xB31D, 0xB337, prLVT}, // Lo [27] HANGUL SYLLABLE DYAG..HANGUL SYLLABLE DYAH {0xB338, 0xB338, prLV}, // Lo HANGUL SYLLABLE DYAE {0xB339, 0xB353, prLVT}, // Lo [27] HANGUL SYLLABLE DYAEG..HANGUL SYLLABLE DYAEH {0xB354, 0xB354, prLV}, // Lo HANGUL SYLLABLE DEO {0xB355, 0xB36F, prLVT}, // Lo [27] HANGUL SYLLABLE DEOG..HANGUL SYLLABLE DEOH {0xB370, 0xB370, prLV}, // Lo HANGUL SYLLABLE DE {0xB371, 0xB38B, prLVT}, // Lo [27] HANGUL SYLLABLE DEG..HANGUL SYLLABLE DEH {0xB38C, 0xB38C, prLV}, // Lo HANGUL SYLLABLE DYEO {0xB38D, 0xB3A7, prLVT}, // Lo [27] HANGUL SYLLABLE DYEOG..HANGUL SYLLABLE DYEOH {0xB3A8, 0xB3A8, prLV}, // Lo HANGUL SYLLABLE DYE {0xB3A9, 0xB3C3, prLVT}, // Lo [27] HANGUL SYLLABLE DYEG..HANGUL SYLLABLE DYEH {0xB3C4, 0xB3C4, prLV}, // Lo HANGUL SYLLABLE DO {0xB3C5, 0xB3DF, prLVT}, // Lo [27] HANGUL SYLLABLE DOG..HANGUL SYLLABLE DOH {0xB3E0, 0xB3E0, prLV}, // Lo HANGUL SYLLABLE DWA {0xB3E1, 0xB3FB, prLVT}, // Lo [27] HANGUL SYLLABLE DWAG..HANGUL SYLLABLE DWAH {0xB3FC, 0xB3FC, prLV}, // Lo HANGUL SYLLABLE DWAE {0xB3FD, 0xB417, prLVT}, // Lo [27] HANGUL SYLLABLE DWAEG..HANGUL SYLLABLE DWAEH {0xB418, 0xB418, prLV}, // Lo HANGUL SYLLABLE DOE {0xB419, 0xB433, prLVT}, // Lo [27] HANGUL SYLLABLE DOEG..HANGUL SYLLABLE DOEH {0xB434, 0xB434, prLV}, // Lo HANGUL SYLLABLE DYO {0xB435, 0xB44F, prLVT}, // Lo [27] HANGUL SYLLABLE DYOG..HANGUL SYLLABLE DYOH {0xB450, 0xB450, prLV}, // Lo HANGUL SYLLABLE DU {0xB451, 0xB46B, prLVT}, // Lo [27] HANGUL SYLLABLE DUG..HANGUL SYLLABLE DUH {0xB46C, 0xB46C, prLV}, // Lo HANGUL SYLLABLE DWEO {0xB46D, 0xB487, prLVT}, // Lo [27] HANGUL SYLLABLE DWEOG..HANGUL SYLLABLE DWEOH {0xB488, 0xB488, prLV}, // Lo HANGUL SYLLABLE DWE {0xB489, 0xB4A3, prLVT}, // Lo [27] HANGUL SYLLABLE DWEG..HANGUL SYLLABLE DWEH {0xB4A4, 0xB4A4, prLV}, // Lo HANGUL SYLLABLE DWI {0xB4A5, 0xB4BF, prLVT}, // Lo [27] HANGUL SYLLABLE DWIG..HANGUL SYLLABLE DWIH {0xB4C0, 0xB4C0, prLV}, // Lo HANGUL SYLLABLE DYU {0xB4C1, 0xB4DB, prLVT}, // Lo [27] HANGUL SYLLABLE DYUG..HANGUL SYLLABLE DYUH {0xB4DC, 0xB4DC, prLV}, // Lo HANGUL SYLLABLE DEU {0xB4DD, 0xB4F7, prLVT}, // Lo [27] HANGUL SYLLABLE DEUG..HANGUL SYLLABLE DEUH {0xB4F8, 0xB4F8, prLV}, // Lo HANGUL SYLLABLE DYI {0xB4F9, 0xB513, prLVT}, // Lo [27] HANGUL SYLLABLE DYIG..HANGUL SYLLABLE DYIH {0xB514, 0xB514, prLV}, // Lo HANGUL SYLLABLE DI {0xB515, 0xB52F, prLVT}, // Lo [27] HANGUL SYLLABLE DIG..HANGUL SYLLABLE DIH {0xB530, 0xB530, prLV}, // Lo HANGUL SYLLABLE DDA {0xB531, 0xB54B, prLVT}, // Lo [27] HANGUL SYLLABLE DDAG..HANGUL SYLLABLE DDAH {0xB54C, 0xB54C, prLV}, // Lo HANGUL SYLLABLE DDAE {0xB54D, 0xB567, prLVT}, // Lo [27] HANGUL SYLLABLE DDAEG..HANGUL SYLLABLE DDAEH {0xB568, 0xB568, prLV}, // Lo HANGUL SYLLABLE DDYA {0xB569, 0xB583, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAG..HANGUL SYLLABLE DDYAH {0xB584, 0xB584, prLV}, // Lo HANGUL SYLLABLE DDYAE {0xB585, 0xB59F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAEG..HANGUL SYLLABLE DDYAEH {0xB5A0, 0xB5A0, prLV}, // Lo HANGUL SYLLABLE DDEO {0xB5A1, 0xB5BB, prLVT}, // Lo [27] HANGUL SYLLABLE DDEOG..HANGUL SYLLABLE DDEOH {0xB5BC, 0xB5BC, prLV}, // Lo HANGUL SYLLABLE DDE {0xB5BD, 0xB5D7, prLVT}, // Lo [27] HANGUL SYLLABLE DDEG..HANGUL SYLLABLE DDEH {0xB5D8, 0xB5D8, prLV}, // Lo HANGUL SYLLABLE DDYEO {0xB5D9, 0xB5F3, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEOG..HANGUL SYLLABLE DDYEOH {0xB5F4, 0xB5F4, prLV}, // Lo HANGUL SYLLABLE DDYE {0xB5F5, 0xB60F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEG..HANGUL SYLLABLE DDYEH {0xB610, 0xB610, prLV}, // Lo HANGUL SYLLABLE DDO {0xB611, 0xB62B, prLVT}, // Lo [27] HANGUL SYLLABLE DDOG..HANGUL SYLLABLE DDOH {0xB62C, 0xB62C, prLV}, // Lo HANGUL SYLLABLE DDWA {0xB62D, 0xB647, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAG..HANGUL SYLLABLE DDWAH {0xB648, 0xB648, prLV}, // Lo HANGUL SYLLABLE DDWAE {0xB649, 0xB663, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAEG..HANGUL SYLLABLE DDWAEH {0xB664, 0xB664, prLV}, // Lo HANGUL SYLLABLE DDOE {0xB665, 0xB67F, prLVT}, // Lo [27] HANGUL SYLLABLE DDOEG..HANGUL SYLLABLE DDOEH {0xB680, 0xB680, prLV}, // Lo HANGUL SYLLABLE DDYO {0xB681, 0xB69B, prLVT}, // Lo [27] HANGUL SYLLABLE DDYOG..HANGUL SYLLABLE DDYOH {0xB69C, 0xB69C, prLV}, // Lo HANGUL SYLLABLE DDU {0xB69D, 0xB6B7, prLVT}, // Lo [27] HANGUL SYLLABLE DDUG..HANGUL SYLLABLE DDUH {0xB6B8, 0xB6B8, prLV}, // Lo HANGUL SYLLABLE DDWEO {0xB6B9, 0xB6D3, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEOG..HANGUL SYLLABLE DDWEOH {0xB6D4, 0xB6D4, prLV}, // Lo HANGUL SYLLABLE DDWE {0xB6D5, 0xB6EF, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEG..HANGUL SYLLABLE DDWEH {0xB6F0, 0xB6F0, prLV}, // Lo HANGUL SYLLABLE DDWI {0xB6F1, 0xB70B, prLVT}, // Lo [27] HANGUL SYLLABLE DDWIG..HANGUL SYLLABLE DDWIH {0xB70C, 0xB70C, prLV}, // Lo HANGUL SYLLABLE DDYU {0xB70D, 0xB727, prLVT}, // Lo [27] HANGUL SYLLABLE DDYUG..HANGUL SYLLABLE DDYUH {0xB728, 0xB728, prLV}, // Lo HANGUL SYLLABLE DDEU {0xB729, 0xB743, prLVT}, // Lo [27] HANGUL SYLLABLE DDEUG..HANGUL SYLLABLE DDEUH {0xB744, 0xB744, prLV}, // Lo HANGUL SYLLABLE DDYI {0xB745, 0xB75F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYIG..HANGUL SYLLABLE DDYIH {0xB760, 0xB760, prLV}, // Lo HANGUL SYLLABLE DDI {0xB761, 0xB77B, prLVT}, // Lo [27] HANGUL SYLLABLE DDIG..HANGUL SYLLABLE DDIH {0xB77C, 0xB77C, prLV}, // Lo HANGUL SYLLABLE RA {0xB77D, 0xB797, prLVT}, // Lo [27] HANGUL SYLLABLE RAG..HANGUL SYLLABLE RAH {0xB798, 0xB798, prLV}, // Lo HANGUL SYLLABLE RAE {0xB799, 0xB7B3, prLVT}, // Lo [27] HANGUL SYLLABLE RAEG..HANGUL SYLLABLE RAEH {0xB7B4, 0xB7B4, prLV}, // Lo HANGUL SYLLABLE RYA {0xB7B5, 0xB7CF, prLVT}, // Lo [27] HANGUL SYLLABLE RYAG..HANGUL SYLLABLE RYAH {0xB7D0, 0xB7D0, prLV}, // Lo HANGUL SYLLABLE RYAE {0xB7D1, 0xB7EB, prLVT}, // Lo [27] HANGUL SYLLABLE RYAEG..HANGUL SYLLABLE RYAEH {0xB7EC, 0xB7EC, prLV}, // Lo HANGUL SYLLABLE REO {0xB7ED, 0xB807, prLVT}, // Lo [27] HANGUL SYLLABLE REOG..HANGUL SYLLABLE REOH {0xB808, 0xB808, prLV}, // Lo HANGUL SYLLABLE RE {0xB809, 0xB823, prLVT}, // Lo [27] HANGUL SYLLABLE REG..HANGUL SYLLABLE REH {0xB824, 0xB824, prLV}, // Lo HANGUL SYLLABLE RYEO {0xB825, 0xB83F, prLVT}, // Lo [27] HANGUL SYLLABLE RYEOG..HANGUL SYLLABLE RYEOH {0xB840, 0xB840, prLV}, // Lo HANGUL SYLLABLE RYE {0xB841, 0xB85B, prLVT}, // Lo [27] HANGUL SYLLABLE RYEG..HANGUL SYLLABLE RYEH {0xB85C, 0xB85C, prLV}, // Lo HANGUL SYLLABLE RO {0xB85D, 0xB877, prLVT}, // Lo [27] HANGUL SYLLABLE ROG..HANGUL SYLLABLE ROH {0xB878, 0xB878, prLV}, // Lo HANGUL SYLLABLE RWA {0xB879, 0xB893, prLVT}, // Lo [27] HANGUL SYLLABLE RWAG..HANGUL SYLLABLE RWAH {0xB894, 0xB894, prLV}, // Lo HANGUL SYLLABLE RWAE {0xB895, 0xB8AF, prLVT}, // Lo [27] HANGUL SYLLABLE RWAEG..HANGUL SYLLABLE RWAEH {0xB8B0, 0xB8B0, prLV}, // Lo HANGUL SYLLABLE ROE {0xB8B1, 0xB8CB, prLVT}, // Lo [27] HANGUL SYLLABLE ROEG..HANGUL SYLLABLE ROEH {0xB8CC, 0xB8CC, prLV}, // Lo HANGUL SYLLABLE RYO {0xB8CD, 0xB8E7, prLVT}, // Lo [27] HANGUL SYLLABLE RYOG..HANGUL SYLLABLE RYOH {0xB8E8, 0xB8E8, prLV}, // Lo HANGUL SYLLABLE RU {0xB8E9, 0xB903, prLVT}, // Lo [27] HANGUL SYLLABLE RUG..HANGUL SYLLABLE RUH {0xB904, 0xB904, prLV}, // Lo HANGUL SYLLABLE RWEO {0xB905, 0xB91F, prLVT}, // Lo [27] HANGUL SYLLABLE RWEOG..HANGUL SYLLABLE RWEOH {0xB920, 0xB920, prLV}, // Lo HANGUL SYLLABLE RWE {0xB921, 0xB93B, prLVT}, // Lo [27] HANGUL SYLLABLE RWEG..HANGUL SYLLABLE RWEH {0xB93C, 0xB93C, prLV}, // Lo HANGUL SYLLABLE RWI {0xB93D, 0xB957, prLVT}, // Lo [27] HANGUL SYLLABLE RWIG..HANGUL SYLLABLE RWIH {0xB958, 0xB958, prLV}, // Lo HANGUL SYLLABLE RYU {0xB959, 0xB973, prLVT}, // Lo [27] HANGUL SYLLABLE RYUG..HANGUL SYLLABLE RYUH {0xB974, 0xB974, prLV}, // Lo HANGUL SYLLABLE REU {0xB975, 0xB98F, prLVT}, // Lo [27] HANGUL SYLLABLE REUG..HANGUL SYLLABLE REUH {0xB990, 0xB990, prLV}, // Lo HANGUL SYLLABLE RYI {0xB991, 0xB9AB, prLVT}, // Lo [27] HANGUL SYLLABLE RYIG..HANGUL SYLLABLE RYIH {0xB9AC, 0xB9AC, prLV}, // Lo HANGUL SYLLABLE RI {0xB9AD, 0xB9C7, prLVT}, // Lo [27] HANGUL SYLLABLE RIG..HANGUL SYLLABLE RIH {0xB9C8, 0xB9C8, prLV}, // Lo HANGUL SYLLABLE MA {0xB9C9, 0xB9E3, prLVT}, // Lo [27] HANGUL SYLLABLE MAG..HANGUL SYLLABLE MAH {0xB9E4, 0xB9E4, prLV}, // Lo HANGUL SYLLABLE MAE {0xB9E5, 0xB9FF, prLVT}, // Lo [27] HANGUL SYLLABLE MAEG..HANGUL SYLLABLE MAEH {0xBA00, 0xBA00, prLV}, // Lo HANGUL SYLLABLE MYA {0xBA01, 0xBA1B, prLVT}, // Lo [27] HANGUL SYLLABLE MYAG..HANGUL SYLLABLE MYAH {0xBA1C, 0xBA1C, prLV}, // Lo HANGUL SYLLABLE MYAE {0xBA1D, 0xBA37, prLVT}, // Lo [27] HANGUL SYLLABLE MYAEG..HANGUL SYLLABLE MYAEH {0xBA38, 0xBA38, prLV}, // Lo HANGUL SYLLABLE MEO {0xBA39, 0xBA53, prLVT}, // Lo [27] HANGUL SYLLABLE MEOG..HANGUL SYLLABLE MEOH {0xBA54, 0xBA54, prLV}, // Lo HANGUL SYLLABLE ME {0xBA55, 0xBA6F, prLVT}, // Lo [27] HANGUL SYLLABLE MEG..HANGUL SYLLABLE MEH {0xBA70, 0xBA70, prLV}, // Lo HANGUL SYLLABLE MYEO {0xBA71, 0xBA8B, prLVT}, // Lo [27] HANGUL SYLLABLE MYEOG..HANGUL SYLLABLE MYEOH {0xBA8C, 0xBA8C, prLV}, // Lo HANGUL SYLLABLE MYE {0xBA8D, 0xBAA7, prLVT}, // Lo [27] HANGUL SYLLABLE MYEG..HANGUL SYLLABLE MYEH {0xBAA8, 0xBAA8, prLV}, // Lo HANGUL SYLLABLE MO {0xBAA9, 0xBAC3, prLVT}, // Lo [27] HANGUL SYLLABLE MOG..HANGUL SYLLABLE MOH {0xBAC4, 0xBAC4, prLV}, // Lo HANGUL SYLLABLE MWA {0xBAC5, 0xBADF, prLVT}, // Lo [27] HANGUL SYLLABLE MWAG..HANGUL SYLLABLE MWAH {0xBAE0, 0xBAE0, prLV}, // Lo HANGUL SYLLABLE MWAE {0xBAE1, 0xBAFB, prLVT}, // Lo [27] HANGUL SYLLABLE MWAEG..HANGUL SYLLABLE MWAEH {0xBAFC, 0xBAFC, prLV}, // Lo HANGUL SYLLABLE MOE {0xBAFD, 0xBB17, prLVT}, // Lo [27] HANGUL SYLLABLE MOEG..HANGUL SYLLABLE MOEH {0xBB18, 0xBB18, prLV}, // Lo HANGUL SYLLABLE MYO {0xBB19, 0xBB33, prLVT}, // Lo [27] HANGUL SYLLABLE MYOG..HANGUL SYLLABLE MYOH {0xBB34, 0xBB34, prLV}, // Lo HANGUL SYLLABLE MU {0xBB35, 0xBB4F, prLVT}, // Lo [27] HANGUL SYLLABLE MUG..HANGUL SYLLABLE MUH {0xBB50, 0xBB50, prLV}, // Lo HANGUL SYLLABLE MWEO {0xBB51, 0xBB6B, prLVT}, // Lo [27] HANGUL SYLLABLE MWEOG..HANGUL SYLLABLE MWEOH {0xBB6C, 0xBB6C, prLV}, // Lo HANGUL SYLLABLE MWE {0xBB6D, 0xBB87, prLVT}, // Lo [27] HANGUL SYLLABLE MWEG..HANGUL SYLLABLE MWEH {0xBB88, 0xBB88, prLV}, // Lo HANGUL SYLLABLE MWI {0xBB89, 0xBBA3, prLVT}, // Lo [27] HANGUL SYLLABLE MWIG..HANGUL SYLLABLE MWIH {0xBBA4, 0xBBA4, prLV}, // Lo HANGUL SYLLABLE MYU {0xBBA5, 0xBBBF, prLVT}, // Lo [27] HANGUL SYLLABLE MYUG..HANGUL SYLLABLE MYUH {0xBBC0, 0xBBC0, prLV}, // Lo HANGUL SYLLABLE MEU {0xBBC1, 0xBBDB, prLVT}, // Lo [27] HANGUL SYLLABLE MEUG..HANGUL SYLLABLE MEUH {0xBBDC, 0xBBDC, prLV}, // Lo HANGUL SYLLABLE MYI {0xBBDD, 0xBBF7, prLVT}, // Lo [27] HANGUL SYLLABLE MYIG..HANGUL SYLLABLE MYIH {0xBBF8, 0xBBF8, prLV}, // Lo HANGUL SYLLABLE MI {0xBBF9, 0xBC13, prLVT}, // Lo [27] HANGUL SYLLABLE MIG..HANGUL SYLLABLE MIH {0xBC14, 0xBC14, prLV}, // Lo HANGUL SYLLABLE BA {0xBC15, 0xBC2F, prLVT}, // Lo [27] HANGUL SYLLABLE BAG..HANGUL SYLLABLE BAH {0xBC30, 0xBC30, prLV}, // Lo HANGUL SYLLABLE BAE {0xBC31, 0xBC4B, prLVT}, // Lo [27] HANGUL SYLLABLE BAEG..HANGUL SYLLABLE BAEH {0xBC4C, 0xBC4C, prLV}, // Lo HANGUL SYLLABLE BYA {0xBC4D, 0xBC67, prLVT}, // Lo [27] HANGUL SYLLABLE BYAG..HANGUL SYLLABLE BYAH {0xBC68, 0xBC68, prLV}, // Lo HANGUL SYLLABLE BYAE {0xBC69, 0xBC83, prLVT}, // Lo [27] HANGUL SYLLABLE BYAEG..HANGUL SYLLABLE BYAEH {0xBC84, 0xBC84, prLV}, // Lo HANGUL SYLLABLE BEO {0xBC85, 0xBC9F, prLVT}, // Lo [27] HANGUL SYLLABLE BEOG..HANGUL SYLLABLE BEOH {0xBCA0, 0xBCA0, prLV}, // Lo HANGUL SYLLABLE BE {0xBCA1, 0xBCBB, prLVT}, // Lo [27] HANGUL SYLLABLE BEG..HANGUL SYLLABLE BEH {0xBCBC, 0xBCBC, prLV}, // Lo HANGUL SYLLABLE BYEO {0xBCBD, 0xBCD7, prLVT}, // Lo [27] HANGUL SYLLABLE BYEOG..HANGUL SYLLABLE BYEOH {0xBCD8, 0xBCD8, prLV}, // Lo HANGUL SYLLABLE BYE {0xBCD9, 0xBCF3, prLVT}, // Lo [27] HANGUL SYLLABLE BYEG..HANGUL SYLLABLE BYEH {0xBCF4, 0xBCF4, prLV}, // Lo HANGUL SYLLABLE BO {0xBCF5, 0xBD0F, prLVT}, // Lo [27] HANGUL SYLLABLE BOG..HANGUL SYLLABLE BOH {0xBD10, 0xBD10, prLV}, // Lo HANGUL SYLLABLE BWA {0xBD11, 0xBD2B, prLVT}, // Lo [27] HANGUL SYLLABLE BWAG..HANGUL SYLLABLE BWAH {0xBD2C, 0xBD2C, prLV}, // Lo HANGUL SYLLABLE BWAE {0xBD2D, 0xBD47, prLVT}, // Lo [27] HANGUL SYLLABLE BWAEG..HANGUL SYLLABLE BWAEH {0xBD48, 0xBD48, prLV}, // Lo HANGUL SYLLABLE BOE {0xBD49, 0xBD63, prLVT}, // Lo [27] HANGUL SYLLABLE BOEG..HANGUL SYLLABLE BOEH {0xBD64, 0xBD64, prLV}, // Lo HANGUL SYLLABLE BYO {0xBD65, 0xBD7F, prLVT}, // Lo [27] HANGUL SYLLABLE BYOG..HANGUL SYLLABLE BYOH {0xBD80, 0xBD80, prLV}, // Lo HANGUL SYLLABLE BU {0xBD81, 0xBD9B, prLVT}, // Lo [27] HANGUL SYLLABLE BUG..HANGUL SYLLABLE BUH {0xBD9C, 0xBD9C, prLV}, // Lo HANGUL SYLLABLE BWEO {0xBD9D, 0xBDB7, prLVT}, // Lo [27] HANGUL SYLLABLE BWEOG..HANGUL SYLLABLE BWEOH {0xBDB8, 0xBDB8, prLV}, // Lo HANGUL SYLLABLE BWE {0xBDB9, 0xBDD3, prLVT}, // Lo [27] HANGUL SYLLABLE BWEG..HANGUL SYLLABLE BWEH {0xBDD4, 0xBDD4, prLV}, // Lo HANGUL SYLLABLE BWI {0xBDD5, 0xBDEF, prLVT}, // Lo [27] HANGUL SYLLABLE BWIG..HANGUL SYLLABLE BWIH {0xBDF0, 0xBDF0, prLV}, // Lo HANGUL SYLLABLE BYU {0xBDF1, 0xBE0B, prLVT}, // Lo [27] HANGUL SYLLABLE BYUG..HANGUL SYLLABLE BYUH {0xBE0C, 0xBE0C, prLV}, // Lo HANGUL SYLLABLE BEU {0xBE0D, 0xBE27, prLVT}, // Lo [27] HANGUL SYLLABLE BEUG..HANGUL SYLLABLE BEUH {0xBE28, 0xBE28, prLV}, // Lo HANGUL SYLLABLE BYI {0xBE29, 0xBE43, prLVT}, // Lo [27] HANGUL SYLLABLE BYIG..HANGUL SYLLABLE BYIH {0xBE44, 0xBE44, prLV}, // Lo HANGUL SYLLABLE BI {0xBE45, 0xBE5F, prLVT}, // Lo [27] HANGUL SYLLABLE BIG..HANGUL SYLLABLE BIH {0xBE60, 0xBE60, prLV}, // Lo HANGUL SYLLABLE BBA {0xBE61, 0xBE7B, prLVT}, // Lo [27] HANGUL SYLLABLE BBAG..HANGUL SYLLABLE BBAH {0xBE7C, 0xBE7C, prLV}, // Lo HANGUL SYLLABLE BBAE {0xBE7D, 0xBE97, prLVT}, // Lo [27] HANGUL SYLLABLE BBAEG..HANGUL SYLLABLE BBAEH {0xBE98, 0xBE98, prLV}, // Lo HANGUL SYLLABLE BBYA {0xBE99, 0xBEB3, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAG..HANGUL SYLLABLE BBYAH {0xBEB4, 0xBEB4, prLV}, // Lo HANGUL SYLLABLE BBYAE {0xBEB5, 0xBECF, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAEG..HANGUL SYLLABLE BBYAEH {0xBED0, 0xBED0, prLV}, // Lo HANGUL SYLLABLE BBEO {0xBED1, 0xBEEB, prLVT}, // Lo [27] HANGUL SYLLABLE BBEOG..HANGUL SYLLABLE BBEOH {0xBEEC, 0xBEEC, prLV}, // Lo HANGUL SYLLABLE BBE {0xBEED, 0xBF07, prLVT}, // Lo [27] HANGUL SYLLABLE BBEG..HANGUL SYLLABLE BBEH {0xBF08, 0xBF08, prLV}, // Lo HANGUL SYLLABLE BBYEO {0xBF09, 0xBF23, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEOG..HANGUL SYLLABLE BBYEOH {0xBF24, 0xBF24, prLV}, // Lo HANGUL SYLLABLE BBYE {0xBF25, 0xBF3F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEG..HANGUL SYLLABLE BBYEH {0xBF40, 0xBF40, prLV}, // Lo HANGUL SYLLABLE BBO {0xBF41, 0xBF5B, prLVT}, // Lo [27] HANGUL SYLLABLE BBOG..HANGUL SYLLABLE BBOH {0xBF5C, 0xBF5C, prLV}, // Lo HANGUL SYLLABLE BBWA {0xBF5D, 0xBF77, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAG..HANGUL SYLLABLE BBWAH {0xBF78, 0xBF78, prLV}, // Lo HANGUL SYLLABLE BBWAE {0xBF79, 0xBF93, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAEG..HANGUL SYLLABLE BBWAEH {0xBF94, 0xBF94, prLV}, // Lo HANGUL SYLLABLE BBOE {0xBF95, 0xBFAF, prLVT}, // Lo [27] HANGUL SYLLABLE BBOEG..HANGUL SYLLABLE BBOEH {0xBFB0, 0xBFB0, prLV}, // Lo HANGUL SYLLABLE BBYO {0xBFB1, 0xBFCB, prLVT}, // Lo [27] HANGUL SYLLABLE BBYOG..HANGUL SYLLABLE BBYOH {0xBFCC, 0xBFCC, prLV}, // Lo HANGUL SYLLABLE BBU {0xBFCD, 0xBFE7, prLVT}, // Lo [27] HANGUL SYLLABLE BBUG..HANGUL SYLLABLE BBUH {0xBFE8, 0xBFE8, prLV}, // Lo HANGUL SYLLABLE BBWEO {0xBFE9, 0xC003, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEOG..HANGUL SYLLABLE BBWEOH {0xC004, 0xC004, prLV}, // Lo HANGUL SYLLABLE BBWE {0xC005, 0xC01F, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEG..HANGUL SYLLABLE BBWEH {0xC020, 0xC020, prLV}, // Lo HANGUL SYLLABLE BBWI {0xC021, 0xC03B, prLVT}, // Lo [27] HANGUL SYLLABLE BBWIG..HANGUL SYLLABLE BBWIH {0xC03C, 0xC03C, prLV}, // Lo HANGUL SYLLABLE BBYU {0xC03D, 0xC057, prLVT}, // Lo [27] HANGUL SYLLABLE BBYUG..HANGUL SYLLABLE BBYUH {0xC058, 0xC058, prLV}, // Lo HANGUL SYLLABLE BBEU {0xC059, 0xC073, prLVT}, // Lo [27] HANGUL SYLLABLE BBEUG..HANGUL SYLLABLE BBEUH {0xC074, 0xC074, prLV}, // Lo HANGUL SYLLABLE BBYI {0xC075, 0xC08F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYIG..HANGUL SYLLABLE BBYIH {0xC090, 0xC090, prLV}, // Lo HANGUL SYLLABLE BBI {0xC091, 0xC0AB, prLVT}, // Lo [27] HANGUL SYLLABLE BBIG..HANGUL SYLLABLE BBIH {0xC0AC, 0xC0AC, prLV}, // Lo HANGUL SYLLABLE SA {0xC0AD, 0xC0C7, prLVT}, // Lo [27] HANGUL SYLLABLE SAG..HANGUL SYLLABLE SAH {0xC0C8, 0xC0C8, prLV}, // Lo HANGUL SYLLABLE SAE {0xC0C9, 0xC0E3, prLVT}, // Lo [27] HANGUL SYLLABLE SAEG..HANGUL SYLLABLE SAEH {0xC0E4, 0xC0E4, prLV}, // Lo HANGUL SYLLABLE SYA {0xC0E5, 0xC0FF, prLVT}, // Lo [27] HANGUL SYLLABLE SYAG..HANGUL SYLLABLE SYAH {0xC100, 0xC100, prLV}, // Lo HANGUL SYLLABLE SYAE {0xC101, 0xC11B, prLVT}, // Lo [27] HANGUL SYLLABLE SYAEG..HANGUL SYLLABLE SYAEH {0xC11C, 0xC11C, prLV}, // Lo HANGUL SYLLABLE SEO {0xC11D, 0xC137, prLVT}, // Lo [27] HANGUL SYLLABLE SEOG..HANGUL SYLLABLE SEOH {0xC138, 0xC138, prLV}, // Lo HANGUL SYLLABLE SE {0xC139, 0xC153, prLVT}, // Lo [27] HANGUL SYLLABLE SEG..HANGUL SYLLABLE SEH {0xC154, 0xC154, prLV}, // Lo HANGUL SYLLABLE SYEO {0xC155, 0xC16F, prLVT}, // Lo [27] HANGUL SYLLABLE SYEOG..HANGUL SYLLABLE SYEOH {0xC170, 0xC170, prLV}, // Lo HANGUL SYLLABLE SYE {0xC171, 0xC18B, prLVT}, // Lo [27] HANGUL SYLLABLE SYEG..HANGUL SYLLABLE SYEH {0xC18C, 0xC18C, prLV}, // Lo HANGUL SYLLABLE SO {0xC18D, 0xC1A7, prLVT}, // Lo [27] HANGUL SYLLABLE SOG..HANGUL SYLLABLE SOH {0xC1A8, 0xC1A8, prLV}, // Lo HANGUL SYLLABLE SWA {0xC1A9, 0xC1C3, prLVT}, // Lo [27] HANGUL SYLLABLE SWAG..HANGUL SYLLABLE SWAH {0xC1C4, 0xC1C4, prLV}, // Lo HANGUL SYLLABLE SWAE {0xC1C5, 0xC1DF, prLVT}, // Lo [27] HANGUL SYLLABLE SWAEG..HANGUL SYLLABLE SWAEH {0xC1E0, 0xC1E0, prLV}, // Lo HANGUL SYLLABLE SOE {0xC1E1, 0xC1FB, prLVT}, // Lo [27] HANGUL SYLLABLE SOEG..HANGUL SYLLABLE SOEH {0xC1FC, 0xC1FC, prLV}, // Lo HANGUL SYLLABLE SYO {0xC1FD, 0xC217, prLVT}, // Lo [27] HANGUL SYLLABLE SYOG..HANGUL SYLLABLE SYOH {0xC218, 0xC218, prLV}, // Lo HANGUL SYLLABLE SU {0xC219, 0xC233, prLVT}, // Lo [27] HANGUL SYLLABLE SUG..HANGUL SYLLABLE SUH {0xC234, 0xC234, prLV}, // Lo HANGUL SYLLABLE SWEO {0xC235, 0xC24F, prLVT}, // Lo [27] HANGUL SYLLABLE SWEOG..HANGUL SYLLABLE SWEOH {0xC250, 0xC250, prLV}, // Lo HANGUL SYLLABLE SWE {0xC251, 0xC26B, prLVT}, // Lo [27] HANGUL SYLLABLE SWEG..HANGUL SYLLABLE SWEH {0xC26C, 0xC26C, prLV}, // Lo HANGUL SYLLABLE SWI {0xC26D, 0xC287, prLVT}, // Lo [27] HANGUL SYLLABLE SWIG..HANGUL SYLLABLE SWIH {0xC288, 0xC288, prLV}, // Lo HANGUL SYLLABLE SYU {0xC289, 0xC2A3, prLVT}, // Lo [27] HANGUL SYLLABLE SYUG..HANGUL SYLLABLE SYUH {0xC2A4, 0xC2A4, prLV}, // Lo HANGUL SYLLABLE SEU {0xC2A5, 0xC2BF, prLVT}, // Lo [27] HANGUL SYLLABLE SEUG..HANGUL SYLLABLE SEUH {0xC2C0, 0xC2C0, prLV}, // Lo HANGUL SYLLABLE SYI {0xC2C1, 0xC2DB, prLVT}, // Lo [27] HANGUL SYLLABLE SYIG..HANGUL SYLLABLE SYIH {0xC2DC, 0xC2DC, prLV}, // Lo HANGUL SYLLABLE SI {0xC2DD, 0xC2F7, prLVT}, // Lo [27] HANGUL SYLLABLE SIG..HANGUL SYLLABLE SIH {0xC2F8, 0xC2F8, prLV}, // Lo HANGUL SYLLABLE SSA {0xC2F9, 0xC313, prLVT}, // Lo [27] HANGUL SYLLABLE SSAG..HANGUL SYLLABLE SSAH {0xC314, 0xC314, prLV}, // Lo HANGUL SYLLABLE SSAE {0xC315, 0xC32F, prLVT}, // Lo [27] HANGUL SYLLABLE SSAEG..HANGUL SYLLABLE SSAEH {0xC330, 0xC330, prLV}, // Lo HANGUL SYLLABLE SSYA {0xC331, 0xC34B, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAG..HANGUL SYLLABLE SSYAH {0xC34C, 0xC34C, prLV}, // Lo HANGUL SYLLABLE SSYAE {0xC34D, 0xC367, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAEG..HANGUL SYLLABLE SSYAEH {0xC368, 0xC368, prLV}, // Lo HANGUL SYLLABLE SSEO {0xC369, 0xC383, prLVT}, // Lo [27] HANGUL SYLLABLE SSEOG..HANGUL SYLLABLE SSEOH {0xC384, 0xC384, prLV}, // Lo HANGUL SYLLABLE SSE {0xC385, 0xC39F, prLVT}, // Lo [27] HANGUL SYLLABLE SSEG..HANGUL SYLLABLE SSEH {0xC3A0, 0xC3A0, prLV}, // Lo HANGUL SYLLABLE SSYEO {0xC3A1, 0xC3BB, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEOG..HANGUL SYLLABLE SSYEOH {0xC3BC, 0xC3BC, prLV}, // Lo HANGUL SYLLABLE SSYE {0xC3BD, 0xC3D7, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEG..HANGUL SYLLABLE SSYEH {0xC3D8, 0xC3D8, prLV}, // Lo HANGUL SYLLABLE SSO {0xC3D9, 0xC3F3, prLVT}, // Lo [27] HANGUL SYLLABLE SSOG..HANGUL SYLLABLE SSOH {0xC3F4, 0xC3F4, prLV}, // Lo HANGUL SYLLABLE SSWA {0xC3F5, 0xC40F, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAG..HANGUL SYLLABLE SSWAH {0xC410, 0xC410, prLV}, // Lo HANGUL SYLLABLE SSWAE {0xC411, 0xC42B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAEG..HANGUL SYLLABLE SSWAEH {0xC42C, 0xC42C, prLV}, // Lo HANGUL SYLLABLE SSOE {0xC42D, 0xC447, prLVT}, // Lo [27] HANGUL SYLLABLE SSOEG..HANGUL SYLLABLE SSOEH {0xC448, 0xC448, prLV}, // Lo HANGUL SYLLABLE SSYO {0xC449, 0xC463, prLVT}, // Lo [27] HANGUL SYLLABLE SSYOG..HANGUL SYLLABLE SSYOH {0xC464, 0xC464, prLV}, // Lo HANGUL SYLLABLE SSU {0xC465, 0xC47F, prLVT}, // Lo [27] HANGUL SYLLABLE SSUG..HANGUL SYLLABLE SSUH {0xC480, 0xC480, prLV}, // Lo HANGUL SYLLABLE SSWEO {0xC481, 0xC49B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEOG..HANGUL SYLLABLE SSWEOH {0xC49C, 0xC49C, prLV}, // Lo HANGUL SYLLABLE SSWE {0xC49D, 0xC4B7, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEG..HANGUL SYLLABLE SSWEH {0xC4B8, 0xC4B8, prLV}, // Lo HANGUL SYLLABLE SSWI {0xC4B9, 0xC4D3, prLVT}, // Lo [27] HANGUL SYLLABLE SSWIG..HANGUL SYLLABLE SSWIH {0xC4D4, 0xC4D4, prLV}, // Lo HANGUL SYLLABLE SSYU {0xC4D5, 0xC4EF, prLVT}, // Lo [27] HANGUL SYLLABLE SSYUG..HANGUL SYLLABLE SSYUH {0xC4F0, 0xC4F0, prLV}, // Lo HANGUL SYLLABLE SSEU {0xC4F1, 0xC50B, prLVT}, // Lo [27] HANGUL SYLLABLE SSEUG..HANGUL SYLLABLE SSEUH {0xC50C, 0xC50C, prLV}, // Lo HANGUL SYLLABLE SSYI {0xC50D, 0xC527, prLVT}, // Lo [27] HANGUL SYLLABLE SSYIG..HANGUL SYLLABLE SSYIH {0xC528, 0xC528, prLV}, // Lo HANGUL SYLLABLE SSI {0xC529, 0xC543, prLVT}, // Lo [27] HANGUL SYLLABLE SSIG..HANGUL SYLLABLE SSIH {0xC544, 0xC544, prLV}, // Lo HANGUL SYLLABLE A {0xC545, 0xC55F, prLVT}, // Lo [27] HANGUL SYLLABLE AG..HANGUL SYLLABLE AH {0xC560, 0xC560, prLV}, // Lo HANGUL SYLLABLE AE {0xC561, 0xC57B, prLVT}, // Lo [27] HANGUL SYLLABLE AEG..HANGUL SYLLABLE AEH {0xC57C, 0xC57C, prLV}, // Lo HANGUL SYLLABLE YA {0xC57D, 0xC597, prLVT}, // Lo [27] HANGUL SYLLABLE YAG..HANGUL SYLLABLE YAH {0xC598, 0xC598, prLV}, // Lo HANGUL SYLLABLE YAE {0xC599, 0xC5B3, prLVT}, // Lo [27] HANGUL SYLLABLE YAEG..HANGUL SYLLABLE YAEH {0xC5B4, 0xC5B4, prLV}, // Lo HANGUL SYLLABLE EO {0xC5B5, 0xC5CF, prLVT}, // Lo [27] HANGUL SYLLABLE EOG..HANGUL SYLLABLE EOH {0xC5D0, 0xC5D0, prLV}, // Lo HANGUL SYLLABLE E {0xC5D1, 0xC5EB, prLVT}, // Lo [27] HANGUL SYLLABLE EG..HANGUL SYLLABLE EH {0xC5EC, 0xC5EC, prLV}, // Lo HANGUL SYLLABLE YEO {0xC5ED, 0xC607, prLVT}, // Lo [27] HANGUL SYLLABLE YEOG..HANGUL SYLLABLE YEOH {0xC608, 0xC608, prLV}, // Lo HANGUL SYLLABLE YE {0xC609, 0xC623, prLVT}, // Lo [27] HANGUL SYLLABLE YEG..HANGUL SYLLABLE YEH {0xC624, 0xC624, prLV}, // Lo HANGUL SYLLABLE O {0xC625, 0xC63F, prLVT}, // Lo [27] HANGUL SYLLABLE OG..HANGUL SYLLABLE OH {0xC640, 0xC640, prLV}, // Lo HANGUL SYLLABLE WA {0xC641, 0xC65B, prLVT}, // Lo [27] HANGUL SYLLABLE WAG..HANGUL SYLLABLE WAH {0xC65C, 0xC65C, prLV}, // Lo HANGUL SYLLABLE WAE {0xC65D, 0xC677, prLVT}, // Lo [27] HANGUL SYLLABLE WAEG..HANGUL SYLLABLE WAEH {0xC678, 0xC678, prLV}, // Lo HANGUL SYLLABLE OE {0xC679, 0xC693, prLVT}, // Lo [27] HANGUL SYLLABLE OEG..HANGUL SYLLABLE OEH {0xC694, 0xC694, prLV}, // Lo HANGUL SYLLABLE YO {0xC695, 0xC6AF, prLVT}, // Lo [27] HANGUL SYLLABLE YOG..HANGUL SYLLABLE YOH {0xC6B0, 0xC6B0, prLV}, // Lo HANGUL SYLLABLE U {0xC6B1, 0xC6CB, prLVT}, // Lo [27] HANGUL SYLLABLE UG..HANGUL SYLLABLE UH {0xC6CC, 0xC6CC, prLV}, // Lo HANGUL SYLLABLE WEO {0xC6CD, 0xC6E7, prLVT}, // Lo [27] HANGUL SYLLABLE WEOG..HANGUL SYLLABLE WEOH {0xC6E8, 0xC6E8, prLV}, // Lo HANGUL SYLLABLE WE {0xC6E9, 0xC703, prLVT}, // Lo [27] HANGUL SYLLABLE WEG..HANGUL SYLLABLE WEH {0xC704, 0xC704, prLV}, // Lo HANGUL SYLLABLE WI {0xC705, 0xC71F, prLVT}, // Lo [27] HANGUL SYLLABLE WIG..HANGUL SYLLABLE WIH {0xC720, 0xC720, prLV}, // Lo HANGUL SYLLABLE YU {0xC721, 0xC73B, prLVT}, // Lo [27] HANGUL SYLLABLE YUG..HANGUL SYLLABLE YUH {0xC73C, 0xC73C, prLV}, // Lo HANGUL SYLLABLE EU {0xC73D, 0xC757, prLVT}, // Lo [27] HANGUL SYLLABLE EUG..HANGUL SYLLABLE EUH {0xC758, 0xC758, prLV}, // Lo HANGUL SYLLABLE YI {0xC759, 0xC773, prLVT}, // Lo [27] HANGUL SYLLABLE YIG..HANGUL SYLLABLE YIH {0xC774, 0xC774, prLV}, // Lo HANGUL SYLLABLE I {0xC775, 0xC78F, prLVT}, // Lo [27] HANGUL SYLLABLE IG..HANGUL SYLLABLE IH {0xC790, 0xC790, prLV}, // Lo HANGUL SYLLABLE JA {0xC791, 0xC7AB, prLVT}, // Lo [27] HANGUL SYLLABLE JAG..HANGUL SYLLABLE JAH {0xC7AC, 0xC7AC, prLV}, // Lo HANGUL SYLLABLE JAE {0xC7AD, 0xC7C7, prLVT}, // Lo [27] HANGUL SYLLABLE JAEG..HANGUL SYLLABLE JAEH {0xC7C8, 0xC7C8, prLV}, // Lo HANGUL SYLLABLE JYA {0xC7C9, 0xC7E3, prLVT}, // Lo [27] HANGUL SYLLABLE JYAG..HANGUL SYLLABLE JYAH {0xC7E4, 0xC7E4, prLV}, // Lo HANGUL SYLLABLE JYAE {0xC7E5, 0xC7FF, prLVT}, // Lo [27] HANGUL SYLLABLE JYAEG..HANGUL SYLLABLE JYAEH {0xC800, 0xC800, prLV}, // Lo HANGUL SYLLABLE JEO {0xC801, 0xC81B, prLVT}, // Lo [27] HANGUL SYLLABLE JEOG..HANGUL SYLLABLE JEOH {0xC81C, 0xC81C, prLV}, // Lo HANGUL SYLLABLE JE {0xC81D, 0xC837, prLVT}, // Lo [27] HANGUL SYLLABLE JEG..HANGUL SYLLABLE JEH {0xC838, 0xC838, prLV}, // Lo HANGUL SYLLABLE JYEO {0xC839, 0xC853, prLVT}, // Lo [27] HANGUL SYLLABLE JYEOG..HANGUL SYLLABLE JYEOH {0xC854, 0xC854, prLV}, // Lo HANGUL SYLLABLE JYE {0xC855, 0xC86F, prLVT}, // Lo [27] HANGUL SYLLABLE JYEG..HANGUL SYLLABLE JYEH {0xC870, 0xC870, prLV}, // Lo HANGUL SYLLABLE JO {0xC871, 0xC88B, prLVT}, // Lo [27] HANGUL SYLLABLE JOG..HANGUL SYLLABLE JOH {0xC88C, 0xC88C, prLV}, // Lo HANGUL SYLLABLE JWA {0xC88D, 0xC8A7, prLVT}, // Lo [27] HANGUL SYLLABLE JWAG..HANGUL SYLLABLE JWAH {0xC8A8, 0xC8A8, prLV}, // Lo HANGUL SYLLABLE JWAE {0xC8A9, 0xC8C3, prLVT}, // Lo [27] HANGUL SYLLABLE JWAEG..HANGUL SYLLABLE JWAEH {0xC8C4, 0xC8C4, prLV}, // Lo HANGUL SYLLABLE JOE {0xC8C5, 0xC8DF, prLVT}, // Lo [27] HANGUL SYLLABLE JOEG..HANGUL SYLLABLE JOEH {0xC8E0, 0xC8E0, prLV}, // Lo HANGUL SYLLABLE JYO {0xC8E1, 0xC8FB, prLVT}, // Lo [27] HANGUL SYLLABLE JYOG..HANGUL SYLLABLE JYOH {0xC8FC, 0xC8FC, prLV}, // Lo HANGUL SYLLABLE JU {0xC8FD, 0xC917, prLVT}, // Lo [27] HANGUL SYLLABLE JUG..HANGUL SYLLABLE JUH {0xC918, 0xC918, prLV}, // Lo HANGUL SYLLABLE JWEO {0xC919, 0xC933, prLVT}, // Lo [27] HANGUL SYLLABLE JWEOG..HANGUL SYLLABLE JWEOH {0xC934, 0xC934, prLV}, // Lo HANGUL SYLLABLE JWE {0xC935, 0xC94F, prLVT}, // Lo [27] HANGUL SYLLABLE JWEG..HANGUL SYLLABLE JWEH {0xC950, 0xC950, prLV}, // Lo HANGUL SYLLABLE JWI {0xC951, 0xC96B, prLVT}, // Lo [27] HANGUL SYLLABLE JWIG..HANGUL SYLLABLE JWIH {0xC96C, 0xC96C, prLV}, // Lo HANGUL SYLLABLE JYU {0xC96D, 0xC987, prLVT}, // Lo [27] HANGUL SYLLABLE JYUG..HANGUL SYLLABLE JYUH {0xC988, 0xC988, prLV}, // Lo HANGUL SYLLABLE JEU {0xC989, 0xC9A3, prLVT}, // Lo [27] HANGUL SYLLABLE JEUG..HANGUL SYLLABLE JEUH {0xC9A4, 0xC9A4, prLV}, // Lo HANGUL SYLLABLE JYI {0xC9A5, 0xC9BF, prLVT}, // Lo [27] HANGUL SYLLABLE JYIG..HANGUL SYLLABLE JYIH {0xC9C0, 0xC9C0, prLV}, // Lo HANGUL SYLLABLE JI {0xC9C1, 0xC9DB, prLVT}, // Lo [27] HANGUL SYLLABLE JIG..HANGUL SYLLABLE JIH {0xC9DC, 0xC9DC, prLV}, // Lo HANGUL SYLLABLE JJA {0xC9DD, 0xC9F7, prLVT}, // Lo [27] HANGUL SYLLABLE JJAG..HANGUL SYLLABLE JJAH {0xC9F8, 0xC9F8, prLV}, // Lo HANGUL SYLLABLE JJAE {0xC9F9, 0xCA13, prLVT}, // Lo [27] HANGUL SYLLABLE JJAEG..HANGUL SYLLABLE JJAEH {0xCA14, 0xCA14, prLV}, // Lo HANGUL SYLLABLE JJYA {0xCA15, 0xCA2F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAG..HANGUL SYLLABLE JJYAH {0xCA30, 0xCA30, prLV}, // Lo HANGUL SYLLABLE JJYAE {0xCA31, 0xCA4B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAEG..HANGUL SYLLABLE JJYAEH {0xCA4C, 0xCA4C, prLV}, // Lo HANGUL SYLLABLE JJEO {0xCA4D, 0xCA67, prLVT}, // Lo [27] HANGUL SYLLABLE JJEOG..HANGUL SYLLABLE JJEOH {0xCA68, 0xCA68, prLV}, // Lo HANGUL SYLLABLE JJE {0xCA69, 0xCA83, prLVT}, // Lo [27] HANGUL SYLLABLE JJEG..HANGUL SYLLABLE JJEH {0xCA84, 0xCA84, prLV}, // Lo HANGUL SYLLABLE JJYEO {0xCA85, 0xCA9F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEOG..HANGUL SYLLABLE JJYEOH {0xCAA0, 0xCAA0, prLV}, // Lo HANGUL SYLLABLE JJYE {0xCAA1, 0xCABB, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEG..HANGUL SYLLABLE JJYEH {0xCABC, 0xCABC, prLV}, // Lo HANGUL SYLLABLE JJO {0xCABD, 0xCAD7, prLVT}, // Lo [27] HANGUL SYLLABLE JJOG..HANGUL SYLLABLE JJOH {0xCAD8, 0xCAD8, prLV}, // Lo HANGUL SYLLABLE JJWA {0xCAD9, 0xCAF3, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAG..HANGUL SYLLABLE JJWAH {0xCAF4, 0xCAF4, prLV}, // Lo HANGUL SYLLABLE JJWAE {0xCAF5, 0xCB0F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAEG..HANGUL SYLLABLE JJWAEH {0xCB10, 0xCB10, prLV}, // Lo HANGUL SYLLABLE JJOE {0xCB11, 0xCB2B, prLVT}, // Lo [27] HANGUL SYLLABLE JJOEG..HANGUL SYLLABLE JJOEH {0xCB2C, 0xCB2C, prLV}, // Lo HANGUL SYLLABLE JJYO {0xCB2D, 0xCB47, prLVT}, // Lo [27] HANGUL SYLLABLE JJYOG..HANGUL SYLLABLE JJYOH {0xCB48, 0xCB48, prLV}, // Lo HANGUL SYLLABLE JJU {0xCB49, 0xCB63, prLVT}, // Lo [27] HANGUL SYLLABLE JJUG..HANGUL SYLLABLE JJUH {0xCB64, 0xCB64, prLV}, // Lo HANGUL SYLLABLE JJWEO {0xCB65, 0xCB7F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEOG..HANGUL SYLLABLE JJWEOH {0xCB80, 0xCB80, prLV}, // Lo HANGUL SYLLABLE JJWE {0xCB81, 0xCB9B, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEG..HANGUL SYLLABLE JJWEH {0xCB9C, 0xCB9C, prLV}, // Lo HANGUL SYLLABLE JJWI {0xCB9D, 0xCBB7, prLVT}, // Lo [27] HANGUL SYLLABLE JJWIG..HANGUL SYLLABLE JJWIH {0xCBB8, 0xCBB8, prLV}, // Lo HANGUL SYLLABLE JJYU {0xCBB9, 0xCBD3, prLVT}, // Lo [27] HANGUL SYLLABLE JJYUG..HANGUL SYLLABLE JJYUH {0xCBD4, 0xCBD4, prLV}, // Lo HANGUL SYLLABLE JJEU {0xCBD5, 0xCBEF, prLVT}, // Lo [27] HANGUL SYLLABLE JJEUG..HANGUL SYLLABLE JJEUH {0xCBF0, 0xCBF0, prLV}, // Lo HANGUL SYLLABLE JJYI {0xCBF1, 0xCC0B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYIG..HANGUL SYLLABLE JJYIH {0xCC0C, 0xCC0C, prLV}, // Lo HANGUL SYLLABLE JJI {0xCC0D, 0xCC27, prLVT}, // Lo [27] HANGUL SYLLABLE JJIG..HANGUL SYLLABLE JJIH {0xCC28, 0xCC28, prLV}, // Lo HANGUL SYLLABLE CA {0xCC29, 0xCC43, prLVT}, // Lo [27] HANGUL SYLLABLE CAG..HANGUL SYLLABLE CAH {0xCC44, 0xCC44, prLV}, // Lo HANGUL SYLLABLE CAE {0xCC45, 0xCC5F, prLVT}, // Lo [27] HANGUL SYLLABLE CAEG..HANGUL SYLLABLE CAEH {0xCC60, 0xCC60, prLV}, // Lo HANGUL SYLLABLE CYA {0xCC61, 0xCC7B, prLVT}, // Lo [27] HANGUL SYLLABLE CYAG..HANGUL SYLLABLE CYAH {0xCC7C, 0xCC7C, prLV}, // Lo HANGUL SYLLABLE CYAE {0xCC7D, 0xCC97, prLVT}, // Lo [27] HANGUL SYLLABLE CYAEG..HANGUL SYLLABLE CYAEH {0xCC98, 0xCC98, prLV}, // Lo HANGUL SYLLABLE CEO {0xCC99, 0xCCB3, prLVT}, // Lo [27] HANGUL SYLLABLE CEOG..HANGUL SYLLABLE CEOH {0xCCB4, 0xCCB4, prLV}, // Lo HANGUL SYLLABLE CE {0xCCB5, 0xCCCF, prLVT}, // Lo [27] HANGUL SYLLABLE CEG..HANGUL SYLLABLE CEH {0xCCD0, 0xCCD0, prLV}, // Lo HANGUL SYLLABLE CYEO {0xCCD1, 0xCCEB, prLVT}, // Lo [27] HANGUL SYLLABLE CYEOG..HANGUL SYLLABLE CYEOH {0xCCEC, 0xCCEC, prLV}, // Lo HANGUL SYLLABLE CYE {0xCCED, 0xCD07, prLVT}, // Lo [27] HANGUL SYLLABLE CYEG..HANGUL SYLLABLE CYEH {0xCD08, 0xCD08, prLV}, // Lo HANGUL SYLLABLE CO {0xCD09, 0xCD23, prLVT}, // Lo [27] HANGUL SYLLABLE COG..HANGUL SYLLABLE COH {0xCD24, 0xCD24, prLV}, // Lo HANGUL SYLLABLE CWA {0xCD25, 0xCD3F, prLVT}, // Lo [27] HANGUL SYLLABLE CWAG..HANGUL SYLLABLE CWAH {0xCD40, 0xCD40, prLV}, // Lo HANGUL SYLLABLE CWAE {0xCD41, 0xCD5B, prLVT}, // Lo [27] HANGUL SYLLABLE CWAEG..HANGUL SYLLABLE CWAEH {0xCD5C, 0xCD5C, prLV}, // Lo HANGUL SYLLABLE COE {0xCD5D, 0xCD77, prLVT}, // Lo [27] HANGUL SYLLABLE COEG..HANGUL SYLLABLE COEH {0xCD78, 0xCD78, prLV}, // Lo HANGUL SYLLABLE CYO {0xCD79, 0xCD93, prLVT}, // Lo [27] HANGUL SYLLABLE CYOG..HANGUL SYLLABLE CYOH {0xCD94, 0xCD94, prLV}, // Lo HANGUL SYLLABLE CU {0xCD95, 0xCDAF, prLVT}, // Lo [27] HANGUL SYLLABLE CUG..HANGUL SYLLABLE CUH {0xCDB0, 0xCDB0, prLV}, // Lo HANGUL SYLLABLE CWEO {0xCDB1, 0xCDCB, prLVT}, // Lo [27] HANGUL SYLLABLE CWEOG..HANGUL SYLLABLE CWEOH {0xCDCC, 0xCDCC, prLV}, // Lo HANGUL SYLLABLE CWE {0xCDCD, 0xCDE7, prLVT}, // Lo [27] HANGUL SYLLABLE CWEG..HANGUL SYLLABLE CWEH {0xCDE8, 0xCDE8, prLV}, // Lo HANGUL SYLLABLE CWI {0xCDE9, 0xCE03, prLVT}, // Lo [27] HANGUL SYLLABLE CWIG..HANGUL SYLLABLE CWIH {0xCE04, 0xCE04, prLV}, // Lo HANGUL SYLLABLE CYU {0xCE05, 0xCE1F, prLVT}, // Lo [27] HANGUL SYLLABLE CYUG..HANGUL SYLLABLE CYUH {0xCE20, 0xCE20, prLV}, // Lo HANGUL SYLLABLE CEU {0xCE21, 0xCE3B, prLVT}, // Lo [27] HANGUL SYLLABLE CEUG..HANGUL SYLLABLE CEUH {0xCE3C, 0xCE3C, prLV}, // Lo HANGUL SYLLABLE CYI {0xCE3D, 0xCE57, prLVT}, // Lo [27] HANGUL SYLLABLE CYIG..HANGUL SYLLABLE CYIH {0xCE58, 0xCE58, prLV}, // Lo HANGUL SYLLABLE CI {0xCE59, 0xCE73, prLVT}, // Lo [27] HANGUL SYLLABLE CIG..HANGUL SYLLABLE CIH {0xCE74, 0xCE74, prLV}, // Lo HANGUL SYLLABLE KA {0xCE75, 0xCE8F, prLVT}, // Lo [27] HANGUL SYLLABLE KAG..HANGUL SYLLABLE KAH {0xCE90, 0xCE90, prLV}, // Lo HANGUL SYLLABLE KAE {0xCE91, 0xCEAB, prLVT}, // Lo [27] HANGUL SYLLABLE KAEG..HANGUL SYLLABLE KAEH {0xCEAC, 0xCEAC, prLV}, // Lo HANGUL SYLLABLE KYA {0xCEAD, 0xCEC7, prLVT}, // Lo [27] HANGUL SYLLABLE KYAG..HANGUL SYLLABLE KYAH {0xCEC8, 0xCEC8, prLV}, // Lo HANGUL SYLLABLE KYAE {0xCEC9, 0xCEE3, prLVT}, // Lo [27] HANGUL SYLLABLE KYAEG..HANGUL SYLLABLE KYAEH {0xCEE4, 0xCEE4, prLV}, // Lo HANGUL SYLLABLE KEO {0xCEE5, 0xCEFF, prLVT}, // Lo [27] HANGUL SYLLABLE KEOG..HANGUL SYLLABLE KEOH {0xCF00, 0xCF00, prLV}, // Lo HANGUL SYLLABLE KE {0xCF01, 0xCF1B, prLVT}, // Lo [27] HANGUL SYLLABLE KEG..HANGUL SYLLABLE KEH {0xCF1C, 0xCF1C, prLV}, // Lo HANGUL SYLLABLE KYEO {0xCF1D, 0xCF37, prLVT}, // Lo [27] HANGUL SYLLABLE KYEOG..HANGUL SYLLABLE KYEOH {0xCF38, 0xCF38, prLV}, // Lo HANGUL SYLLABLE KYE {0xCF39, 0xCF53, prLVT}, // Lo [27] HANGUL SYLLABLE KYEG..HANGUL SYLLABLE KYEH {0xCF54, 0xCF54, prLV}, // Lo HANGUL SYLLABLE KO {0xCF55, 0xCF6F, prLVT}, // Lo [27] HANGUL SYLLABLE KOG..HANGUL SYLLABLE KOH {0xCF70, 0xCF70, prLV}, // Lo HANGUL SYLLABLE KWA {0xCF71, 0xCF8B, prLVT}, // Lo [27] HANGUL SYLLABLE KWAG..HANGUL SYLLABLE KWAH {0xCF8C, 0xCF8C, prLV}, // Lo HANGUL SYLLABLE KWAE {0xCF8D, 0xCFA7, prLVT}, // Lo [27] HANGUL SYLLABLE KWAEG..HANGUL SYLLABLE KWAEH {0xCFA8, 0xCFA8, prLV}, // Lo HANGUL SYLLABLE KOE {0xCFA9, 0xCFC3, prLVT}, // Lo [27] HANGUL SYLLABLE KOEG..HANGUL SYLLABLE KOEH {0xCFC4, 0xCFC4, prLV}, // Lo HANGUL SYLLABLE KYO {0xCFC5, 0xCFDF, prLVT}, // Lo [27] HANGUL SYLLABLE KYOG..HANGUL SYLLABLE KYOH {0xCFE0, 0xCFE0, prLV}, // Lo HANGUL SYLLABLE KU {0xCFE1, 0xCFFB, prLVT}, // Lo [27] HANGUL SYLLABLE KUG..HANGUL SYLLABLE KUH {0xCFFC, 0xCFFC, prLV}, // Lo HANGUL SYLLABLE KWEO {0xCFFD, 0xD017, prLVT}, // Lo [27] HANGUL SYLLABLE KWEOG..HANGUL SYLLABLE KWEOH {0xD018, 0xD018, prLV}, // Lo HANGUL SYLLABLE KWE {0xD019, 0xD033, prLVT}, // Lo [27] HANGUL SYLLABLE KWEG..HANGUL SYLLABLE KWEH {0xD034, 0xD034, prLV}, // Lo HANGUL SYLLABLE KWI {0xD035, 0xD04F, prLVT}, // Lo [27] HANGUL SYLLABLE KWIG..HANGUL SYLLABLE KWIH {0xD050, 0xD050, prLV}, // Lo HANGUL SYLLABLE KYU {0xD051, 0xD06B, prLVT}, // Lo [27] HANGUL SYLLABLE KYUG..HANGUL SYLLABLE KYUH {0xD06C, 0xD06C, prLV}, // Lo HANGUL SYLLABLE KEU {0xD06D, 0xD087, prLVT}, // Lo [27] HANGUL SYLLABLE KEUG..HANGUL SYLLABLE KEUH {0xD088, 0xD088, prLV}, // Lo HANGUL SYLLABLE KYI {0xD089, 0xD0A3, prLVT}, // Lo [27] HANGUL SYLLABLE KYIG..HANGUL SYLLABLE KYIH {0xD0A4, 0xD0A4, prLV}, // Lo HANGUL SYLLABLE KI {0xD0A5, 0xD0BF, prLVT}, // Lo [27] HANGUL SYLLABLE KIG..HANGUL SYLLABLE KIH {0xD0C0, 0xD0C0, prLV}, // Lo HANGUL SYLLABLE TA {0xD0C1, 0xD0DB, prLVT}, // Lo [27] HANGUL SYLLABLE TAG..HANGUL SYLLABLE TAH {0xD0DC, 0xD0DC, prLV}, // Lo HANGUL SYLLABLE TAE {0xD0DD, 0xD0F7, prLVT}, // Lo [27] HANGUL SYLLABLE TAEG..HANGUL SYLLABLE TAEH {0xD0F8, 0xD0F8, prLV}, // Lo HANGUL SYLLABLE TYA {0xD0F9, 0xD113, prLVT}, // Lo [27] HANGUL SYLLABLE TYAG..HANGUL SYLLABLE TYAH {0xD114, 0xD114, prLV}, // Lo HANGUL SYLLABLE TYAE {0xD115, 0xD12F, prLVT}, // Lo [27] HANGUL SYLLABLE TYAEG..HANGUL SYLLABLE TYAEH {0xD130, 0xD130, prLV}, // Lo HANGUL SYLLABLE TEO {0xD131, 0xD14B, prLVT}, // Lo [27] HANGUL SYLLABLE TEOG..HANGUL SYLLABLE TEOH {0xD14C, 0xD14C, prLV}, // Lo HANGUL SYLLABLE TE {0xD14D, 0xD167, prLVT}, // Lo [27] HANGUL SYLLABLE TEG..HANGUL SYLLABLE TEH {0xD168, 0xD168, prLV}, // Lo HANGUL SYLLABLE TYEO {0xD169, 0xD183, prLVT}, // Lo [27] HANGUL SYLLABLE TYEOG..HANGUL SYLLABLE TYEOH {0xD184, 0xD184, prLV}, // Lo HANGUL SYLLABLE TYE {0xD185, 0xD19F, prLVT}, // Lo [27] HANGUL SYLLABLE TYEG..HANGUL SYLLABLE TYEH {0xD1A0, 0xD1A0, prLV}, // Lo HANGUL SYLLABLE TO {0xD1A1, 0xD1BB, prLVT}, // Lo [27] HANGUL SYLLABLE TOG..HANGUL SYLLABLE TOH {0xD1BC, 0xD1BC, prLV}, // Lo HANGUL SYLLABLE TWA {0xD1BD, 0xD1D7, prLVT}, // Lo [27] HANGUL SYLLABLE TWAG..HANGUL SYLLABLE TWAH {0xD1D8, 0xD1D8, prLV}, // Lo HANGUL SYLLABLE TWAE {0xD1D9, 0xD1F3, prLVT}, // Lo [27] HANGUL SYLLABLE TWAEG..HANGUL SYLLABLE TWAEH {0xD1F4, 0xD1F4, prLV}, // Lo HANGUL SYLLABLE TOE {0xD1F5, 0xD20F, prLVT}, // Lo [27] HANGUL SYLLABLE TOEG..HANGUL SYLLABLE TOEH {0xD210, 0xD210, prLV}, // Lo HANGUL SYLLABLE TYO {0xD211, 0xD22B, prLVT}, // Lo [27] HANGUL SYLLABLE TYOG..HANGUL SYLLABLE TYOH {0xD22C, 0xD22C, prLV}, // Lo HANGUL SYLLABLE TU {0xD22D, 0xD247, prLVT}, // Lo [27] HANGUL SYLLABLE TUG..HANGUL SYLLABLE TUH {0xD248, 0xD248, prLV}, // Lo HANGUL SYLLABLE TWEO {0xD249, 0xD263, prLVT}, // Lo [27] HANGUL SYLLABLE TWEOG..HANGUL SYLLABLE TWEOH {0xD264, 0xD264, prLV}, // Lo HANGUL SYLLABLE TWE {0xD265, 0xD27F, prLVT}, // Lo [27] HANGUL SYLLABLE TWEG..HANGUL SYLLABLE TWEH {0xD280, 0xD280, prLV}, // Lo HANGUL SYLLABLE TWI {0xD281, 0xD29B, prLVT}, // Lo [27] HANGUL SYLLABLE TWIG..HANGUL SYLLABLE TWIH {0xD29C, 0xD29C, prLV}, // Lo HANGUL SYLLABLE TYU {0xD29D, 0xD2B7, prLVT}, // Lo [27] HANGUL SYLLABLE TYUG..HANGUL SYLLABLE TYUH {0xD2B8, 0xD2B8, prLV}, // Lo HANGUL SYLLABLE TEU {0xD2B9, 0xD2D3, prLVT}, // Lo [27] HANGUL SYLLABLE TEUG..HANGUL SYLLABLE TEUH {0xD2D4, 0xD2D4, prLV}, // Lo HANGUL SYLLABLE TYI {0xD2D5, 0xD2EF, prLVT}, // Lo [27] HANGUL SYLLABLE TYIG..HANGUL SYLLABLE TYIH {0xD2F0, 0xD2F0, prLV}, // Lo HANGUL SYLLABLE TI {0xD2F1, 0xD30B, prLVT}, // Lo [27] HANGUL SYLLABLE TIG..HANGUL SYLLABLE TIH {0xD30C, 0xD30C, prLV}, // Lo HANGUL SYLLABLE PA {0xD30D, 0xD327, prLVT}, // Lo [27] HANGUL SYLLABLE PAG..HANGUL SYLLABLE PAH {0xD328, 0xD328, prLV}, // Lo HANGUL SYLLABLE PAE {0xD329, 0xD343, prLVT}, // Lo [27] HANGUL SYLLABLE PAEG..HANGUL SYLLABLE PAEH {0xD344, 0xD344, prLV}, // Lo HANGUL SYLLABLE PYA {0xD345, 0xD35F, prLVT}, // Lo [27] HANGUL SYLLABLE PYAG..HANGUL SYLLABLE PYAH {0xD360, 0xD360, prLV}, // Lo HANGUL SYLLABLE PYAE {0xD361, 0xD37B, prLVT}, // Lo [27] HANGUL SYLLABLE PYAEG..HANGUL SYLLABLE PYAEH {0xD37C, 0xD37C, prLV}, // Lo HANGUL SYLLABLE PEO {0xD37D, 0xD397, prLVT}, // Lo [27] HANGUL SYLLABLE PEOG..HANGUL SYLLABLE PEOH {0xD398, 0xD398, prLV}, // Lo HANGUL SYLLABLE PE {0xD399, 0xD3B3, prLVT}, // Lo [27] HANGUL SYLLABLE PEG..HANGUL SYLLABLE PEH {0xD3B4, 0xD3B4, prLV}, // Lo HANGUL SYLLABLE PYEO {0xD3B5, 0xD3CF, prLVT}, // Lo [27] HANGUL SYLLABLE PYEOG..HANGUL SYLLABLE PYEOH {0xD3D0, 0xD3D0, prLV}, // Lo HANGUL SYLLABLE PYE {0xD3D1, 0xD3EB, prLVT}, // Lo [27] HANGUL SYLLABLE PYEG..HANGUL SYLLABLE PYEH {0xD3EC, 0xD3EC, prLV}, // Lo HANGUL SYLLABLE PO {0xD3ED, 0xD407, prLVT}, // Lo [27] HANGUL SYLLABLE POG..HANGUL SYLLABLE POH {0xD408, 0xD408, prLV}, // Lo HANGUL SYLLABLE PWA {0xD409, 0xD423, prLVT}, // Lo [27] HANGUL SYLLABLE PWAG..HANGUL SYLLABLE PWAH {0xD424, 0xD424, prLV}, // Lo HANGUL SYLLABLE PWAE {0xD425, 0xD43F, prLVT}, // Lo [27] HANGUL SYLLABLE PWAEG..HANGUL SYLLABLE PWAEH {0xD440, 0xD440, prLV}, // Lo HANGUL SYLLABLE POE {0xD441, 0xD45B, prLVT}, // Lo [27] HANGUL SYLLABLE POEG..HANGUL SYLLABLE POEH {0xD45C, 0xD45C, prLV}, // Lo HANGUL SYLLABLE PYO {0xD45D, 0xD477, prLVT}, // Lo [27] HANGUL SYLLABLE PYOG..HANGUL SYLLABLE PYOH {0xD478, 0xD478, prLV}, // Lo HANGUL SYLLABLE PU {0xD479, 0xD493, prLVT}, // Lo [27] HANGUL SYLLABLE PUG..HANGUL SYLLABLE PUH {0xD494, 0xD494, prLV}, // Lo HANGUL SYLLABLE PWEO {0xD495, 0xD4AF, prLVT}, // Lo [27] HANGUL SYLLABLE PWEOG..HANGUL SYLLABLE PWEOH {0xD4B0, 0xD4B0, prLV}, // Lo HANGUL SYLLABLE PWE {0xD4B1, 0xD4CB, prLVT}, // Lo [27] HANGUL SYLLABLE PWEG..HANGUL SYLLABLE PWEH {0xD4CC, 0xD4CC, prLV}, // Lo HANGUL SYLLABLE PWI {0xD4CD, 0xD4E7, prLVT}, // Lo [27] HANGUL SYLLABLE PWIG..HANGUL SYLLABLE PWIH {0xD4E8, 0xD4E8, prLV}, // Lo HANGUL SYLLABLE PYU {0xD4E9, 0xD503, prLVT}, // Lo [27] HANGUL SYLLABLE PYUG..HANGUL SYLLABLE PYUH {0xD504, 0xD504, prLV}, // Lo HANGUL SYLLABLE PEU {0xD505, 0xD51F, prLVT}, // Lo [27] HANGUL SYLLABLE PEUG..HANGUL SYLLABLE PEUH {0xD520, 0xD520, prLV}, // Lo HANGUL SYLLABLE PYI {0xD521, 0xD53B, prLVT}, // Lo [27] HANGUL SYLLABLE PYIG..HANGUL SYLLABLE PYIH {0xD53C, 0xD53C, prLV}, // Lo HANGUL SYLLABLE PI {0xD53D, 0xD557, prLVT}, // Lo [27] HANGUL SYLLABLE PIG..HANGUL SYLLABLE PIH {0xD558, 0xD558, prLV}, // Lo HANGUL SYLLABLE HA {0xD559, 0xD573, prLVT}, // Lo [27] HANGUL SYLLABLE HAG..HANGUL SYLLABLE HAH {0xD574, 0xD574, prLV}, // Lo HANGUL SYLLABLE HAE {0xD575, 0xD58F, prLVT}, // Lo [27] HANGUL SYLLABLE HAEG..HANGUL SYLLABLE HAEH {0xD590, 0xD590, prLV}, // Lo HANGUL SYLLABLE HYA {0xD591, 0xD5AB, prLVT}, // Lo [27] HANGUL SYLLABLE HYAG..HANGUL SYLLABLE HYAH {0xD5AC, 0xD5AC, prLV}, // Lo HANGUL SYLLABLE HYAE {0xD5AD, 0xD5C7, prLVT}, // Lo [27] HANGUL SYLLABLE HYAEG..HANGUL SYLLABLE HYAEH {0xD5C8, 0xD5C8, prLV}, // Lo HANGUL SYLLABLE HEO {0xD5C9, 0xD5E3, prLVT}, // Lo [27] HANGUL SYLLABLE HEOG..HANGUL SYLLABLE HEOH {0xD5E4, 0xD5E4, prLV}, // Lo HANGUL SYLLABLE HE {0xD5E5, 0xD5FF, prLVT}, // Lo [27] HANGUL SYLLABLE HEG..HANGUL SYLLABLE HEH {0xD600, 0xD600, prLV}, // Lo HANGUL SYLLABLE HYEO {0xD601, 0xD61B, prLVT}, // Lo [27] HANGUL SYLLABLE HYEOG..HANGUL SYLLABLE HYEOH {0xD61C, 0xD61C, prLV}, // Lo HANGUL SYLLABLE HYE {0xD61D, 0xD637, prLVT}, // Lo [27] HANGUL SYLLABLE HYEG..HANGUL SYLLABLE HYEH {0xD638, 0xD638, prLV}, // Lo HANGUL SYLLABLE HO {0xD639, 0xD653, prLVT}, // Lo [27] HANGUL SYLLABLE HOG..HANGUL SYLLABLE HOH {0xD654, 0xD654, prLV}, // Lo HANGUL SYLLABLE HWA {0xD655, 0xD66F, prLVT}, // Lo [27] HANGUL SYLLABLE HWAG..HANGUL SYLLABLE HWAH {0xD670, 0xD670, prLV}, // Lo HANGUL SYLLABLE HWAE {0xD671, 0xD68B, prLVT}, // Lo [27] HANGUL SYLLABLE HWAEG..HANGUL SYLLABLE HWAEH {0xD68C, 0xD68C, prLV}, // Lo HANGUL SYLLABLE HOE {0xD68D, 0xD6A7, prLVT}, // Lo [27] HANGUL SYLLABLE HOEG..HANGUL SYLLABLE HOEH {0xD6A8, 0xD6A8, prLV}, // Lo HANGUL SYLLABLE HYO {0xD6A9, 0xD6C3, prLVT}, // Lo [27] HANGUL SYLLABLE HYOG..HANGUL SYLLABLE HYOH {0xD6C4, 0xD6C4, prLV}, // Lo HANGUL SYLLABLE HU {0xD6C5, 0xD6DF, prLVT}, // Lo [27] HANGUL SYLLABLE HUG..HANGUL SYLLABLE HUH {0xD6E0, 0xD6E0, prLV}, // Lo HANGUL SYLLABLE HWEO {0xD6E1, 0xD6FB, prLVT}, // Lo [27] HANGUL SYLLABLE HWEOG..HANGUL SYLLABLE HWEOH {0xD6FC, 0xD6FC, prLV}, // Lo HANGUL SYLLABLE HWE {0xD6FD, 0xD717, prLVT}, // Lo [27] HANGUL SYLLABLE HWEG..HANGUL SYLLABLE HWEH {0xD718, 0xD718, prLV}, // Lo HANGUL SYLLABLE HWI {0xD719, 0xD733, prLVT}, // Lo [27] HANGUL SYLLABLE HWIG..HANGUL SYLLABLE HWIH {0xD734, 0xD734, prLV}, // Lo HANGUL SYLLABLE HYU {0xD735, 0xD74F, prLVT}, // Lo [27] HANGUL SYLLABLE HYUG..HANGUL SYLLABLE HYUH {0xD750, 0xD750, prLV}, // Lo HANGUL SYLLABLE HEU {0xD751, 0xD76B, prLVT}, // Lo [27] HANGUL SYLLABLE HEUG..HANGUL SYLLABLE HEUH {0xD76C, 0xD76C, prLV}, // Lo HANGUL SYLLABLE HYI {0xD76D, 0xD787, prLVT}, // Lo [27] HANGUL SYLLABLE HYIG..HANGUL SYLLABLE HYIH {0xD788, 0xD788, prLV}, // Lo HANGUL SYLLABLE HI {0xD789, 0xD7A3, prLVT}, // Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH {0xD7B0, 0xD7C6, prV}, // Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARAEA-E {0xD7CB, 0xD7FB, prT}, // Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEONG PHIEUPH-THIEUTH {0xFB1E, 0xFB1E, prExtend}, // Mn HEBREW POINT JUDEO-SPANISH VARIKA {0xFE00, 0xFE0F, prExtend}, // Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16 {0xFE20, 0xFE2F, prExtend}, // Mn [16] COMBINING LIGATURE LEFT HALF..COMBINING CYRILLIC TITLO RIGHT HALF {0xFEFF, 0xFEFF, prControl}, // Cf ZERO WIDTH NO-BREAK SPACE {0xFF9E, 0xFF9F, prExtend}, // Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK {0xFFF0, 0xFFF8, prControl}, // Cn [9] .. {0xFFF9, 0xFFFB, prControl}, // Cf [3] INTERLINEAR ANNOTATION ANCHOR..INTERLINEAR ANNOTATION TERMINATOR {0x101FD, 0x101FD, prExtend}, // Mn PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE {0x102E0, 0x102E0, prExtend}, // Mn COPTIC EPACT THOUSANDS MARK {0x10376, 0x1037A, prExtend}, // Mn [5] COMBINING OLD PERMIC LETTER AN..COMBINING OLD PERMIC LETTER SII {0x10A01, 0x10A03, prExtend}, // Mn [3] KHAROSHTHI VOWEL SIGN I..KHAROSHTHI VOWEL SIGN VOCALIC R {0x10A05, 0x10A06, prExtend}, // Mn [2] KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SIGN O {0x10A0C, 0x10A0F, prExtend}, // Mn [4] KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI SIGN VISARGA {0x10A38, 0x10A3A, prExtend}, // Mn [3] KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN DOT BELOW {0x10A3F, 0x10A3F, prExtend}, // Mn KHAROSHTHI VIRAMA {0x10AE5, 0x10AE6, prExtend}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW {0x10D24, 0x10D27, prExtend}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI {0x10F46, 0x10F50, prExtend}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW {0x11000, 0x11000, prSpacingMark}, // Mc BRAHMI SIGN CANDRABINDU {0x11001, 0x11001, prExtend}, // Mn BRAHMI SIGN ANUSVARA {0x11002, 0x11002, prSpacingMark}, // Mc BRAHMI SIGN VISARGA {0x11038, 0x11046, prExtend}, // Mn [15] BRAHMI VOWEL SIGN AA..BRAHMI VIRAMA {0x1107F, 0x11081, prExtend}, // Mn [3] BRAHMI NUMBER JOINER..KAITHI SIGN ANUSVARA {0x11082, 0x11082, prSpacingMark}, // Mc KAITHI SIGN VISARGA {0x110B0, 0x110B2, prSpacingMark}, // Mc [3] KAITHI VOWEL SIGN AA..KAITHI VOWEL SIGN II {0x110B3, 0x110B6, prExtend}, // Mn [4] KAITHI VOWEL SIGN U..KAITHI VOWEL SIGN AI {0x110B7, 0x110B8, prSpacingMark}, // Mc [2] KAITHI VOWEL SIGN O..KAITHI VOWEL SIGN AU {0x110B9, 0x110BA, prExtend}, // Mn [2] KAITHI SIGN VIRAMA..KAITHI SIGN NUKTA {0x110BD, 0x110BD, prPreprend}, // Cf KAITHI NUMBER SIGN {0x110CD, 0x110CD, prPreprend}, // Cf KAITHI NUMBER SIGN ABOVE {0x11100, 0x11102, prExtend}, // Mn [3] CHAKMA SIGN CANDRABINDU..CHAKMA SIGN VISARGA {0x11127, 0x1112B, prExtend}, // Mn [5] CHAKMA VOWEL SIGN A..CHAKMA VOWEL SIGN UU {0x1112C, 0x1112C, prSpacingMark}, // Mc CHAKMA VOWEL SIGN E {0x1112D, 0x11134, prExtend}, // Mn [8] CHAKMA VOWEL SIGN AI..CHAKMA MAAYYAA {0x11145, 0x11146, prSpacingMark}, // Mc [2] CHAKMA VOWEL SIGN AA..CHAKMA VOWEL SIGN EI {0x11173, 0x11173, prExtend}, // Mn MAHAJANI SIGN NUKTA {0x11180, 0x11181, prExtend}, // Mn [2] SHARADA SIGN CANDRABINDU..SHARADA SIGN ANUSVARA {0x11182, 0x11182, prSpacingMark}, // Mc SHARADA SIGN VISARGA {0x111B3, 0x111B5, prSpacingMark}, // Mc [3] SHARADA VOWEL SIGN AA..SHARADA VOWEL SIGN II {0x111B6, 0x111BE, prExtend}, // Mn [9] SHARADA VOWEL SIGN U..SHARADA VOWEL SIGN O {0x111BF, 0x111C0, prSpacingMark}, // Mc [2] SHARADA VOWEL SIGN AU..SHARADA SIGN VIRAMA {0x111C2, 0x111C3, prPreprend}, // Lo [2] SHARADA SIGN JIHVAMULIYA..SHARADA SIGN UPADHMANIYA {0x111C9, 0x111CC, prExtend}, // Mn [4] SHARADA SANDHI MARK..SHARADA EXTRA SHORT VOWEL MARK {0x1122C, 0x1122E, prSpacingMark}, // Mc [3] KHOJKI VOWEL SIGN AA..KHOJKI VOWEL SIGN II {0x1122F, 0x11231, prExtend}, // Mn [3] KHOJKI VOWEL SIGN U..KHOJKI VOWEL SIGN AI {0x11232, 0x11233, prSpacingMark}, // Mc [2] KHOJKI VOWEL SIGN O..KHOJKI VOWEL SIGN AU {0x11234, 0x11234, prExtend}, // Mn KHOJKI SIGN ANUSVARA {0x11235, 0x11235, prSpacingMark}, // Mc KHOJKI SIGN VIRAMA {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN {0x112DF, 0x112DF, prExtend}, // Mn KHUDAWADI SIGN ANUSVARA {0x112E0, 0x112E2, prSpacingMark}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II {0x112E3, 0x112EA, prExtend}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA {0x11300, 0x11301, prExtend}, // Mn [2] GRANTHA SIGN COMBINING ANUSVARA ABOVE..GRANTHA SIGN CANDRABINDU {0x11302, 0x11303, prSpacingMark}, // Mc [2] GRANTHA SIGN ANUSVARA..GRANTHA SIGN VISARGA {0x1133B, 0x1133C, prExtend}, // Mn [2] COMBINING BINDU BELOW..GRANTHA SIGN NUKTA {0x1133E, 0x1133E, prExtend}, // Mc GRANTHA VOWEL SIGN AA {0x1133F, 0x1133F, prSpacingMark}, // Mc GRANTHA VOWEL SIGN I {0x11340, 0x11340, prExtend}, // Mn GRANTHA VOWEL SIGN II {0x11341, 0x11344, prSpacingMark}, // Mc [4] GRANTHA VOWEL SIGN U..GRANTHA VOWEL SIGN VOCALIC RR {0x11347, 0x11348, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN EE..GRANTHA VOWEL SIGN AI {0x1134B, 0x1134D, prSpacingMark}, // Mc [3] GRANTHA VOWEL SIGN OO..GRANTHA SIGN VIRAMA {0x11357, 0x11357, prExtend}, // Mc GRANTHA AU LENGTH MARK {0x11362, 0x11363, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN VOCALIC L..GRANTHA VOWEL SIGN VOCALIC LL {0x11366, 0x1136C, prExtend}, // Mn [7] COMBINING GRANTHA DIGIT ZERO..COMBINING GRANTHA DIGIT SIX {0x11370, 0x11374, prExtend}, // Mn [5] COMBINING GRANTHA LETTER A..COMBINING GRANTHA LETTER PA {0x11435, 0x11437, prSpacingMark}, // Mc [3] NEWA VOWEL SIGN AA..NEWA VOWEL SIGN II {0x11438, 0x1143F, prExtend}, // Mn [8] NEWA VOWEL SIGN U..NEWA VOWEL SIGN AI {0x11440, 0x11441, prSpacingMark}, // Mc [2] NEWA VOWEL SIGN O..NEWA VOWEL SIGN AU {0x11442, 0x11444, prExtend}, // Mn [3] NEWA SIGN VIRAMA..NEWA SIGN ANUSVARA {0x11445, 0x11445, prSpacingMark}, // Mc NEWA SIGN VISARGA {0x11446, 0x11446, prExtend}, // Mn NEWA SIGN NUKTA {0x1145E, 0x1145E, prExtend}, // Mn NEWA SANDHI MARK {0x114B0, 0x114B0, prExtend}, // Mc TIRHUTA VOWEL SIGN AA {0x114B1, 0x114B2, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN I..TIRHUTA VOWEL SIGN II {0x114B3, 0x114B8, prExtend}, // Mn [6] TIRHUTA VOWEL SIGN U..TIRHUTA VOWEL SIGN VOCALIC LL {0x114B9, 0x114B9, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN E {0x114BA, 0x114BA, prExtend}, // Mn TIRHUTA VOWEL SIGN SHORT E {0x114BB, 0x114BC, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN AI..TIRHUTA VOWEL SIGN O {0x114BD, 0x114BD, prExtend}, // Mc TIRHUTA VOWEL SIGN SHORT O {0x114BE, 0x114BE, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN AU {0x114BF, 0x114C0, prExtend}, // Mn [2] TIRHUTA SIGN CANDRABINDU..TIRHUTA SIGN ANUSVARA {0x114C1, 0x114C1, prSpacingMark}, // Mc TIRHUTA SIGN VISARGA {0x114C2, 0x114C3, prExtend}, // Mn [2] TIRHUTA SIGN VIRAMA..TIRHUTA SIGN NUKTA {0x115AF, 0x115AF, prExtend}, // Mc SIDDHAM VOWEL SIGN AA {0x115B0, 0x115B1, prSpacingMark}, // Mc [2] SIDDHAM VOWEL SIGN I..SIDDHAM VOWEL SIGN II {0x115B2, 0x115B5, prExtend}, // Mn [4] SIDDHAM VOWEL SIGN U..SIDDHAM VOWEL SIGN VOCALIC RR {0x115B8, 0x115BB, prSpacingMark}, // Mc [4] SIDDHAM VOWEL SIGN E..SIDDHAM VOWEL SIGN AU {0x115BC, 0x115BD, prExtend}, // Mn [2] SIDDHAM SIGN CANDRABINDU..SIDDHAM SIGN ANUSVARA {0x115BE, 0x115BE, prSpacingMark}, // Mc SIDDHAM SIGN VISARGA {0x115BF, 0x115C0, prExtend}, // Mn [2] SIDDHAM SIGN VIRAMA..SIDDHAM SIGN NUKTA {0x115DC, 0x115DD, prExtend}, // Mn [2] SIDDHAM VOWEL SIGN ALTERNATE U..SIDDHAM VOWEL SIGN ALTERNATE UU {0x11630, 0x11632, prSpacingMark}, // Mc [3] MODI VOWEL SIGN AA..MODI VOWEL SIGN II {0x11633, 0x1163A, prExtend}, // Mn [8] MODI VOWEL SIGN U..MODI VOWEL SIGN AI {0x1163B, 0x1163C, prSpacingMark}, // Mc [2] MODI VOWEL SIGN O..MODI VOWEL SIGN AU {0x1163D, 0x1163D, prExtend}, // Mn MODI SIGN ANUSVARA {0x1163E, 0x1163E, prSpacingMark}, // Mc MODI SIGN VISARGA {0x1163F, 0x11640, prExtend}, // Mn [2] MODI SIGN VIRAMA..MODI SIGN ARDHACANDRA {0x116AB, 0x116AB, prExtend}, // Mn TAKRI SIGN ANUSVARA {0x116AC, 0x116AC, prSpacingMark}, // Mc TAKRI SIGN VISARGA {0x116AD, 0x116AD, prExtend}, // Mn TAKRI VOWEL SIGN AA {0x116AE, 0x116AF, prSpacingMark}, // Mc [2] TAKRI VOWEL SIGN I..TAKRI VOWEL SIGN II {0x116B0, 0x116B5, prExtend}, // Mn [6] TAKRI VOWEL SIGN U..TAKRI VOWEL SIGN AU {0x116B6, 0x116B6, prSpacingMark}, // Mc TAKRI SIGN VIRAMA {0x116B7, 0x116B7, prExtend}, // Mn TAKRI SIGN NUKTA {0x1171D, 0x1171F, prExtend}, // Mn [3] AHOM CONSONANT SIGN MEDIAL LA..AHOM CONSONANT SIGN MEDIAL LIGATING RA {0x11720, 0x11721, prSpacingMark}, // Mc [2] AHOM VOWEL SIGN A..AHOM VOWEL SIGN AA {0x11722, 0x11725, prExtend}, // Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU {0x11726, 0x11726, prSpacingMark}, // Mc AHOM VOWEL SIGN E {0x11727, 0x1172B, prExtend}, // Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN KILLER {0x1182C, 0x1182E, prSpacingMark}, // Mc [3] DOGRA VOWEL SIGN AA..DOGRA VOWEL SIGN II {0x1182F, 0x11837, prExtend}, // Mn [9] DOGRA VOWEL SIGN U..DOGRA SIGN ANUSVARA {0x11838, 0x11838, prSpacingMark}, // Mc DOGRA SIGN VISARGA {0x11839, 0x1183A, prExtend}, // Mn [2] DOGRA SIGN VIRAMA..DOGRA SIGN NUKTA {0x119D1, 0x119D3, prSpacingMark}, // Mc [3] NANDINAGARI VOWEL SIGN AA..NANDINAGARI VOWEL SIGN II {0x119D4, 0x119D7, prExtend}, // Mn [4] NANDINAGARI VOWEL SIGN U..NANDINAGARI VOWEL SIGN VOCALIC RR {0x119DA, 0x119DB, prExtend}, // Mn [2] NANDINAGARI VOWEL SIGN E..NANDINAGARI VOWEL SIGN AI {0x119DC, 0x119DF, prSpacingMark}, // Mc [4] NANDINAGARI VOWEL SIGN O..NANDINAGARI SIGN VISARGA {0x119E0, 0x119E0, prExtend}, // Mn NANDINAGARI SIGN VIRAMA {0x119E4, 0x119E4, prSpacingMark}, // Mc NANDINAGARI VOWEL SIGN PRISHTHAMATRA E {0x11A01, 0x11A0A, prExtend}, // Mn [10] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL LENGTH MARK {0x11A33, 0x11A38, prExtend}, // Mn [6] ZANABAZAR SQUARE FINAL CONSONANT MARK..ZANABAZAR SQUARE SIGN ANUSVARA {0x11A39, 0x11A39, prSpacingMark}, // Mc ZANABAZAR SQUARE SIGN VISARGA {0x11A3A, 0x11A3A, prPreprend}, // Lo ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA {0x11A3B, 0x11A3E, prExtend}, // Mn [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA {0x11A47, 0x11A47, prExtend}, // Mn ZANABAZAR SQUARE SUBJOINER {0x11A51, 0x11A56, prExtend}, // Mn [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE {0x11A57, 0x11A58, prSpacingMark}, // Mc [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU {0x11A59, 0x11A5B, prExtend}, // Mn [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK {0x11A84, 0x11A89, prPreprend}, // Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOMBO CLUSTER-INITIAL LETTER SA {0x11A8A, 0x11A96, prExtend}, // Mn [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA {0x11A97, 0x11A97, prSpacingMark}, // Mc SOYOMBO SIGN VISARGA {0x11A98, 0x11A99, prExtend}, // Mn [2] SOYOMBO GEMINATION MARK..SOYOMBO SUBJOINER {0x11C2F, 0x11C2F, prSpacingMark}, // Mc BHAIKSUKI VOWEL SIGN AA {0x11C30, 0x11C36, prExtend}, // Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L {0x11C38, 0x11C3D, prExtend}, // Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA {0x11C3E, 0x11C3E, prSpacingMark}, // Mc BHAIKSUKI SIGN VISARGA {0x11C3F, 0x11C3F, prExtend}, // Mn BHAIKSUKI SIGN VIRAMA {0x11C92, 0x11CA7, prExtend}, // Mn [22] MARCHEN SUBJOINED LETTER KA..MARCHEN SUBJOINED LETTER ZA {0x11CA9, 0x11CA9, prSpacingMark}, // Mc MARCHEN SUBJOINED LETTER YA {0x11CAA, 0x11CB0, prExtend}, // Mn [7] MARCHEN SUBJOINED LETTER RA..MARCHEN VOWEL SIGN AA {0x11CB1, 0x11CB1, prSpacingMark}, // Mc MARCHEN VOWEL SIGN I {0x11CB2, 0x11CB3, prExtend}, // Mn [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E {0x11CB4, 0x11CB4, prSpacingMark}, // Mc MARCHEN VOWEL SIGN O {0x11CB5, 0x11CB6, prExtend}, // Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU {0x11D31, 0x11D36, prExtend}, // Mn [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R {0x11D3A, 0x11D3A, prExtend}, // Mn MASARAM GONDI VOWEL SIGN E {0x11D3C, 0x11D3D, prExtend}, // Mn [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O {0x11D3F, 0x11D45, prExtend}, // Mn [7] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI VIRAMA {0x11D46, 0x11D46, prPreprend}, // Lo MASARAM GONDI REPHA {0x11D47, 0x11D47, prExtend}, // Mn MASARAM GONDI RA-KARA {0x11D8A, 0x11D8E, prSpacingMark}, // Mc [5] GUNJALA GONDI VOWEL SIGN AA..GUNJALA GONDI VOWEL SIGN UU {0x11D90, 0x11D91, prExtend}, // Mn [2] GUNJALA GONDI VOWEL SIGN EE..GUNJALA GONDI VOWEL SIGN AI {0x11D93, 0x11D94, prSpacingMark}, // Mc [2] GUNJALA GONDI VOWEL SIGN OO..GUNJALA GONDI VOWEL SIGN AU {0x11D95, 0x11D95, prExtend}, // Mn GUNJALA GONDI SIGN ANUSVARA {0x11D96, 0x11D96, prSpacingMark}, // Mc GUNJALA GONDI SIGN VISARGA {0x11D97, 0x11D97, prExtend}, // Mn GUNJALA GONDI VIRAMA {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prSpacingMark}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x13430, 0x13438, prControl}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT {0x16AF0, 0x16AF4, prExtend}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE {0x16B30, 0x16B36, prExtend}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM {0x16F4F, 0x16F4F, prExtend}, // Mn MIAO SIGN CONSONANT MODIFIER BAR {0x16F51, 0x16F87, prSpacingMark}, // Mc [55] MIAO SIGN ASPIRATION..MIAO VOWEL SIGN UI {0x16F8F, 0x16F92, prExtend}, // Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW {0x1BC9D, 0x1BC9E, prExtend}, // Mn [2] DUPLOYAN THICK LETTER SELECTOR..DUPLOYAN DOUBLE MARK {0x1BCA0, 0x1BCA3, prControl}, // Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP {0x1D165, 0x1D165, prExtend}, // Mc MUSICAL SYMBOL COMBINING STEM {0x1D166, 0x1D166, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING SPRECHGESANG STEM {0x1D167, 0x1D169, prExtend}, // Mn [3] MUSICAL SYMBOL COMBINING TREMOLO-1..MUSICAL SYMBOL COMBINING TREMOLO-3 {0x1D16D, 0x1D16D, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING AUGMENTATION DOT {0x1D16E, 0x1D172, prExtend}, // Mc [5] MUSICAL SYMBOL COMBINING FLAG-1..MUSICAL SYMBOL COMBINING FLAG-5 {0x1D173, 0x1D17A, prControl}, // Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE {0x1D17B, 0x1D182, prExtend}, // Mn [8] MUSICAL SYMBOL COMBINING ACCENT..MUSICAL SYMBOL COMBINING LOURE {0x1D185, 0x1D18B, prExtend}, // Mn [7] MUSICAL SYMBOL COMBINING DOIT..MUSICAL SYMBOL COMBINING TRIPLE TONGUE {0x1D1AA, 0x1D1AD, prExtend}, // Mn [4] MUSICAL SYMBOL COMBINING DOWN BOW..MUSICAL SYMBOL COMBINING SNAP PIZZICATO {0x1D242, 0x1D244, prExtend}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME {0x1DA00, 0x1DA36, prExtend}, // Mn [55] SIGNWRITING HEAD RIM..SIGNWRITING AIR SUCKING IN {0x1DA3B, 0x1DA6C, prExtend}, // Mn [50] SIGNWRITING MOUTH CLOSED NEUTRAL..SIGNWRITING EXCITEMENT {0x1DA75, 0x1DA75, prExtend}, // Mn SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS {0x1DA84, 0x1DA84, prExtend}, // Mn SIGNWRITING LOCATION HEAD NECK {0x1DA9B, 0x1DA9F, prExtend}, // Mn [5] SIGNWRITING FILL MODIFIER-2..SIGNWRITING FILL MODIFIER-6 {0x1DAA1, 0x1DAAF, prExtend}, // Mn [15] SIGNWRITING ROTATION MODIFIER-2..SIGNWRITING ROTATION MODIFIER-16 {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E8D0, 0x1E8D6, prExtend}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS {0x1E944, 0x1E94A, prExtend}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA {0x1F000, 0x1F02B, prExtendedPictographic}, // 5.1 [44] (🀀..🀫) MAHJONG TILE EAST WIND..MAHJONG TILE BACK {0x1F02C, 0x1F02F, prExtendedPictographic}, // NA [4] (🀬..🀯) .. {0x1F030, 0x1F093, prExtendedPictographic}, // 5.1[100] (🀰..🂓) DOMINO TILE HORIZONTAL BACK..DOMINO TILE VERTICAL-06-06 {0x1F094, 0x1F09F, prExtendedPictographic}, // NA [12] (🂔..🂟) .. {0x1F0A0, 0x1F0AE, prExtendedPictographic}, // 6.0 [15] (🂠..🂮) PLAYING CARD BACK..PLAYING CARD KING OF SPADES {0x1F0AF, 0x1F0B0, prExtendedPictographic}, // NA [2] (🂯..🂰) .. {0x1F0B1, 0x1F0BE, prExtendedPictographic}, // 6.0 [14] (🂱..🂾) PLAYING CARD ACE OF HEARTS..PLAYING CARD KING OF HEARTS {0x1F0BF, 0x1F0BF, prExtendedPictographic}, // 7.0 [1] (🂿) PLAYING CARD RED JOKER {0x1F0C0, 0x1F0C0, prExtendedPictographic}, // NA [1] (🃀) {0x1F0C1, 0x1F0CF, prExtendedPictographic}, // 6.0 [15] (🃁..🃏) PLAYING CARD ACE OF DIAMONDS..joker {0x1F0D0, 0x1F0D0, prExtendedPictographic}, // NA [1] (🃐) {0x1F0D1, 0x1F0DF, prExtendedPictographic}, // 6.0 [15] (🃑..🃟) PLAYING CARD ACE OF CLUBS..PLAYING CARD WHITE JOKER {0x1F0E0, 0x1F0F5, prExtendedPictographic}, // 7.0 [22] (🃠..🃵) PLAYING CARD FOOL..PLAYING CARD TRUMP-21 {0x1F0F6, 0x1F0FF, prExtendedPictographic}, // NA [10] (🃶..🃿) .. {0x1F10D, 0x1F10F, prExtendedPictographic}, // NA [3] (🄍..🄏) .. {0x1F12F, 0x1F12F, prExtendedPictographic}, // 11.0 [1] (🄯) COPYLEFT SYMBOL {0x1F16C, 0x1F16C, prExtendedPictographic}, // 12.0 [1] (🅬) RAISED MR SIGN {0x1F16D, 0x1F16F, prExtendedPictographic}, // NA [3] (🅭..🅯) .. {0x1F170, 0x1F171, prExtendedPictographic}, // 6.0 [2] (🅰️..🅱️) A button (blood type)..B button (blood type) {0x1F17E, 0x1F17E, prExtendedPictographic}, // 6.0 [1] (🅾️) O button (blood type) {0x1F17F, 0x1F17F, prExtendedPictographic}, // 5.2 [1] (🅿️) P button {0x1F18E, 0x1F18E, prExtendedPictographic}, // 6.0 [1] (🆎) AB button (blood type) {0x1F191, 0x1F19A, prExtendedPictographic}, // 6.0 [10] (🆑..🆚) CL button..VS button {0x1F1AD, 0x1F1E5, prExtendedPictographic}, // NA [57] (🆭..🇥) .. {0x1F1E6, 0x1F1FF, prRegionalIndicator}, // So [26] REGIONAL INDICATOR SYMBOL LETTER A..REGIONAL INDICATOR SYMBOL LETTER Z {0x1F201, 0x1F202, prExtendedPictographic}, // 6.0 [2] (🈁..🈂️) Japanese “here” button..Japanese “service charge” button {0x1F203, 0x1F20F, prExtendedPictographic}, // NA [13] (🈃..🈏) .. {0x1F21A, 0x1F21A, prExtendedPictographic}, // 5.2 [1] (🈚) Japanese “free of charge” button {0x1F22F, 0x1F22F, prExtendedPictographic}, // 5.2 [1] (🈯) Japanese “reserved” button {0x1F232, 0x1F23A, prExtendedPictographic}, // 6.0 [9] (🈲..🈺) Japanese “prohibited” button..Japanese “open for business” button {0x1F23C, 0x1F23F, prExtendedPictographic}, // NA [4] (🈼..🈿) .. {0x1F249, 0x1F24F, prExtendedPictographic}, // NA [7] (🉉..🉏) .. {0x1F250, 0x1F251, prExtendedPictographic}, // 6.0 [2] (🉐..🉑) Japanese “bargain” button..Japanese “acceptable” button {0x1F252, 0x1F25F, prExtendedPictographic}, // NA [14] (🉒..🉟) .. {0x1F260, 0x1F265, prExtendedPictographic}, // 10.0 [6] (🉠..🉥) ROUNDED SYMBOL FOR FU..ROUNDED SYMBOL FOR CAI {0x1F266, 0x1F2FF, prExtendedPictographic}, // NA[154] (🉦..🋿) .. {0x1F300, 0x1F320, prExtendedPictographic}, // 6.0 [33] (🌀..🌠) cyclone..shooting star {0x1F321, 0x1F32C, prExtendedPictographic}, // 7.0 [12] (🌡️..🌬️) thermometer..wind face {0x1F32D, 0x1F32F, prExtendedPictographic}, // 8.0 [3] (🌭..🌯) hot dog..burrito {0x1F330, 0x1F335, prExtendedPictographic}, // 6.0 [6] (🌰..🌵) chestnut..cactus {0x1F336, 0x1F336, prExtendedPictographic}, // 7.0 [1] (🌶️) hot pepper {0x1F337, 0x1F37C, prExtendedPictographic}, // 6.0 [70] (🌷..🍼) tulip..baby bottle {0x1F37D, 0x1F37D, prExtendedPictographic}, // 7.0 [1] (🍽️) fork and knife with plate {0x1F37E, 0x1F37F, prExtendedPictographic}, // 8.0 [2] (🍾..🍿) bottle with popping cork..popcorn {0x1F380, 0x1F393, prExtendedPictographic}, // 6.0 [20] (🎀..🎓) ribbon..graduation cap {0x1F394, 0x1F39F, prExtendedPictographic}, // 7.0 [12] (🎔..🎟️) HEART WITH TIP ON THE LEFT..admission tickets {0x1F3A0, 0x1F3C4, prExtendedPictographic}, // 6.0 [37] (🎠..🏄) carousel horse..person surfing {0x1F3C5, 0x1F3C5, prExtendedPictographic}, // 7.0 [1] (🏅) sports medal {0x1F3C6, 0x1F3CA, prExtendedPictographic}, // 6.0 [5] (🏆..🏊) trophy..person swimming {0x1F3CB, 0x1F3CE, prExtendedPictographic}, // 7.0 [4] (🏋️..🏎️) person lifting weights..racing car {0x1F3CF, 0x1F3D3, prExtendedPictographic}, // 8.0 [5] (🏏..🏓) cricket game..ping pong {0x1F3D4, 0x1F3DF, prExtendedPictographic}, // 7.0 [12] (🏔️..🏟️) snow-capped mountain..stadium {0x1F3E0, 0x1F3F0, prExtendedPictographic}, // 6.0 [17] (🏠..🏰) house..castle {0x1F3F1, 0x1F3F7, prExtendedPictographic}, // 7.0 [7] (🏱..🏷️) WHITE PENNANT..label {0x1F3F8, 0x1F3FA, prExtendedPictographic}, // 8.0 [3] (🏸..🏺) badminton..amphora {0x1F3FB, 0x1F3FF, prExtend}, // Sk [5] EMOJI MODIFIER FITZPATRICK TYPE-1-2..EMOJI MODIFIER FITZPATRICK TYPE-6 {0x1F400, 0x1F43E, prExtendedPictographic}, // 6.0 [63] (🐀..🐾) rat..paw prints {0x1F43F, 0x1F43F, prExtendedPictographic}, // 7.0 [1] (🐿️) chipmunk {0x1F440, 0x1F440, prExtendedPictographic}, // 6.0 [1] (👀) eyes {0x1F441, 0x1F441, prExtendedPictographic}, // 7.0 [1] (👁️) eye {0x1F442, 0x1F4F7, prExtendedPictographic}, // 6.0[182] (👂..📷) ear..camera {0x1F4F8, 0x1F4F8, prExtendedPictographic}, // 7.0 [1] (📸) camera with flash {0x1F4F9, 0x1F4FC, prExtendedPictographic}, // 6.0 [4] (📹..📼) video camera..videocassette {0x1F4FD, 0x1F4FE, prExtendedPictographic}, // 7.0 [2] (📽️..📾) film projector..PORTABLE STEREO {0x1F4FF, 0x1F4FF, prExtendedPictographic}, // 8.0 [1] (📿) prayer beads {0x1F500, 0x1F53D, prExtendedPictographic}, // 6.0 [62] (🔀..🔽) shuffle tracks button..downwards button {0x1F546, 0x1F54A, prExtendedPictographic}, // 7.0 [5] (🕆..🕊️) WHITE LATIN CROSS..dove {0x1F54B, 0x1F54F, prExtendedPictographic}, // 8.0 [5] (🕋..🕏) kaaba..BOWL OF HYGIEIA {0x1F550, 0x1F567, prExtendedPictographic}, // 6.0 [24] (🕐..🕧) one o’clock..twelve-thirty {0x1F568, 0x1F579, prExtendedPictographic}, // 7.0 [18] (🕨..🕹️) RIGHT SPEAKER..joystick {0x1F57A, 0x1F57A, prExtendedPictographic}, // 9.0 [1] (🕺) man dancing {0x1F57B, 0x1F5A3, prExtendedPictographic}, // 7.0 [41] (🕻..🖣) LEFT HAND TELEPHONE RECEIVER..BLACK DOWN POINTING BACKHAND INDEX {0x1F5A4, 0x1F5A4, prExtendedPictographic}, // 9.0 [1] (🖤) black heart {0x1F5A5, 0x1F5FA, prExtendedPictographic}, // 7.0 [86] (🖥️..🗺️) desktop computer..world map {0x1F5FB, 0x1F5FF, prExtendedPictographic}, // 6.0 [5] (🗻..🗿) mount fuji..moai {0x1F600, 0x1F600, prExtendedPictographic}, // 6.1 [1] (😀) grinning face {0x1F601, 0x1F610, prExtendedPictographic}, // 6.0 [16] (😁..😐) beaming face with smiling eyes..neutral face {0x1F611, 0x1F611, prExtendedPictographic}, // 6.1 [1] (😑) expressionless face {0x1F612, 0x1F614, prExtendedPictographic}, // 6.0 [3] (😒..😔) unamused face..pensive face {0x1F615, 0x1F615, prExtendedPictographic}, // 6.1 [1] (😕) confused face {0x1F616, 0x1F616, prExtendedPictographic}, // 6.0 [1] (😖) confounded face {0x1F617, 0x1F617, prExtendedPictographic}, // 6.1 [1] (😗) kissing face {0x1F618, 0x1F618, prExtendedPictographic}, // 6.0 [1] (😘) face blowing a kiss {0x1F619, 0x1F619, prExtendedPictographic}, // 6.1 [1] (😙) kissing face with smiling eyes {0x1F61A, 0x1F61A, prExtendedPictographic}, // 6.0 [1] (😚) kissing face with closed eyes {0x1F61B, 0x1F61B, prExtendedPictographic}, // 6.1 [1] (😛) face with tongue {0x1F61C, 0x1F61E, prExtendedPictographic}, // 6.0 [3] (😜..😞) winking face with tongue..disappointed face {0x1F61F, 0x1F61F, prExtendedPictographic}, // 6.1 [1] (😟) worried face {0x1F620, 0x1F625, prExtendedPictographic}, // 6.0 [6] (😠..😥) angry face..sad but relieved face {0x1F626, 0x1F627, prExtendedPictographic}, // 6.1 [2] (😦..😧) frowning face with open mouth..anguished face {0x1F628, 0x1F62B, prExtendedPictographic}, // 6.0 [4] (😨..😫) fearful face..tired face {0x1F62C, 0x1F62C, prExtendedPictographic}, // 6.1 [1] (😬) grimacing face {0x1F62D, 0x1F62D, prExtendedPictographic}, // 6.0 [1] (😭) loudly crying face {0x1F62E, 0x1F62F, prExtendedPictographic}, // 6.1 [2] (😮..😯) face with open mouth..hushed face {0x1F630, 0x1F633, prExtendedPictographic}, // 6.0 [4] (😰..😳) anxious face with sweat..flushed face {0x1F634, 0x1F634, prExtendedPictographic}, // 6.1 [1] (😴) sleeping face {0x1F635, 0x1F640, prExtendedPictographic}, // 6.0 [12] (😵..🙀) dizzy face..weary cat {0x1F641, 0x1F642, prExtendedPictographic}, // 7.0 [2] (🙁..🙂) slightly frowning face..slightly smiling face {0x1F643, 0x1F644, prExtendedPictographic}, // 8.0 [2] (🙃..🙄) upside-down face..face with rolling eyes {0x1F645, 0x1F64F, prExtendedPictographic}, // 6.0 [11] (🙅..🙏) person gesturing NO..folded hands {0x1F680, 0x1F6C5, prExtendedPictographic}, // 6.0 [70] (🚀..🛅) rocket..left luggage {0x1F6C6, 0x1F6CF, prExtendedPictographic}, // 7.0 [10] (🛆..🛏️) TRIANGLE WITH ROUNDED CORNERS..bed {0x1F6D0, 0x1F6D0, prExtendedPictographic}, // 8.0 [1] (🛐) place of worship {0x1F6D1, 0x1F6D2, prExtendedPictographic}, // 9.0 [2] (🛑..🛒) stop sign..shopping cart {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // 10.0 [2] (🛓..🛔) STUPA..PAGODA {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // 12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6DF, prExtendedPictographic}, // NA [10] (🛖..🛟) .. {0x1F6E0, 0x1F6EC, prExtendedPictographic}, // 7.0 [13] (🛠️..🛬) hammer and wrench..airplane arrival {0x1F6ED, 0x1F6EF, prExtendedPictographic}, // NA [3] (🛭..🛯) .. {0x1F6F0, 0x1F6F3, prExtendedPictographic}, // 7.0 [4] (🛰️..🛳️) satellite..passenger ship {0x1F6F4, 0x1F6F6, prExtendedPictographic}, // 9.0 [3] (🛴..🛶) kick scooter..canoe {0x1F6F7, 0x1F6F8, prExtendedPictographic}, // 10.0 [2] (🛷..🛸) sled..flying saucer {0x1F6F9, 0x1F6F9, prExtendedPictographic}, // 11.0 [1] (🛹) skateboard {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // 12.0 [1] (🛺) auto rickshaw {0x1F6FB, 0x1F6FF, prExtendedPictographic}, // NA [5] (🛻..🛿) .. {0x1F774, 0x1F77F, prExtendedPictographic}, // NA [12] (🝴..🝿) .. {0x1F7D5, 0x1F7D8, prExtendedPictographic}, // 11.0 [4] (🟕..🟘) CIRCLED TRIANGLE..NEGATIVE CIRCLED SQUARE {0x1F7D9, 0x1F7DF, prExtendedPictographic}, // NA [7] (🟙..🟟) .. {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // 12.0 [12] (🟠..🟫) orange circle..brown square {0x1F7EC, 0x1F7FF, prExtendedPictographic}, // NA [20] (🟬..🟿) .. {0x1F80C, 0x1F80F, prExtendedPictographic}, // NA [4] (🠌..🠏) .. {0x1F848, 0x1F84F, prExtendedPictographic}, // NA [8] (🡈..🡏) .. {0x1F85A, 0x1F85F, prExtendedPictographic}, // NA [6] (🡚..🡟) .. {0x1F888, 0x1F88F, prExtendedPictographic}, // NA [8] (🢈..🢏) .. {0x1F8AE, 0x1F8FF, prExtendedPictographic}, // NA [82] (🢮..🣿) .. {0x1F90C, 0x1F90C, prExtendedPictographic}, // NA [1] (🤌) {0x1F90D, 0x1F90F, prExtendedPictographic}, // 12.0 [3] (🤍..🤏) white heart..pinching hand {0x1F910, 0x1F918, prExtendedPictographic}, // 8.0 [9] (🤐..🤘) zipper-mouth face..sign of the horns {0x1F919, 0x1F91E, prExtendedPictographic}, // 9.0 [6] (🤙..🤞) call me hand..crossed fingers {0x1F91F, 0x1F91F, prExtendedPictographic}, // 10.0 [1] (🤟) love-you gesture {0x1F920, 0x1F927, prExtendedPictographic}, // 9.0 [8] (🤠..🤧) cowboy hat face..sneezing face {0x1F928, 0x1F92F, prExtendedPictographic}, // 10.0 [8] (🤨..🤯) face with raised eyebrow..exploding head {0x1F930, 0x1F930, prExtendedPictographic}, // 9.0 [1] (🤰) pregnant woman {0x1F931, 0x1F932, prExtendedPictographic}, // 10.0 [2] (🤱..🤲) breast-feeding..palms up together {0x1F933, 0x1F93A, prExtendedPictographic}, // 9.0 [8] (🤳..🤺) selfie..person fencing {0x1F93C, 0x1F93E, prExtendedPictographic}, // 9.0 [3] (🤼..🤾) people wrestling..person playing handball {0x1F93F, 0x1F93F, prExtendedPictographic}, // 12.0 [1] (🤿) diving mask {0x1F940, 0x1F945, prExtendedPictographic}, // 9.0 [6] (🥀..🥅) wilted flower..goal net {0x1F947, 0x1F94B, prExtendedPictographic}, // 9.0 [5] (🥇..🥋) 1st place medal..martial arts uniform {0x1F94C, 0x1F94C, prExtendedPictographic}, // 10.0 [1] (🥌) curling stone {0x1F94D, 0x1F94F, prExtendedPictographic}, // 11.0 [3] (🥍..🥏) lacrosse..flying disc {0x1F950, 0x1F95E, prExtendedPictographic}, // 9.0 [15] (🥐..🥞) croissant..pancakes {0x1F95F, 0x1F96B, prExtendedPictographic}, // 10.0 [13] (🥟..🥫) dumpling..canned food {0x1F96C, 0x1F970, prExtendedPictographic}, // 11.0 [5] (🥬..🥰) leafy green..smiling face with hearts {0x1F971, 0x1F971, prExtendedPictographic}, // 12.0 [1] (🥱) yawning face {0x1F972, 0x1F972, prExtendedPictographic}, // NA [1] (🥲) {0x1F973, 0x1F976, prExtendedPictographic}, // 11.0 [4] (🥳..🥶) partying face..cold face {0x1F977, 0x1F979, prExtendedPictographic}, // NA [3] (🥷..🥹) .. {0x1F97A, 0x1F97A, prExtendedPictographic}, // 11.0 [1] (🥺) pleading face {0x1F97B, 0x1F97B, prExtendedPictographic}, // 12.0 [1] (🥻) sari {0x1F97C, 0x1F97F, prExtendedPictographic}, // 11.0 [4] (🥼..🥿) lab coat..flat shoe {0x1F980, 0x1F984, prExtendedPictographic}, // 8.0 [5] (🦀..🦄) crab..unicorn {0x1F985, 0x1F991, prExtendedPictographic}, // 9.0 [13] (🦅..🦑) eagle..squid {0x1F992, 0x1F997, prExtendedPictographic}, // 10.0 [6] (🦒..🦗) giraffe..cricket {0x1F998, 0x1F9A2, prExtendedPictographic}, // 11.0 [11] (🦘..🦢) kangaroo..swan {0x1F9A3, 0x1F9A4, prExtendedPictographic}, // NA [2] (🦣..🦤) .. {0x1F9A5, 0x1F9AA, prExtendedPictographic}, // 12.0 [6] (🦥..🦪) sloth..oyster {0x1F9AB, 0x1F9AD, prExtendedPictographic}, // NA [3] (🦫..🦭) .. {0x1F9AE, 0x1F9AF, prExtendedPictographic}, // 12.0 [2] (🦮..🦯) guide dog..probing cane {0x1F9B0, 0x1F9B9, prExtendedPictographic}, // 11.0 [10] (🦰..🦹) red hair..supervillain {0x1F9BA, 0x1F9BF, prExtendedPictographic}, // 12.0 [6] (🦺..🦿) safety vest..mechanical leg {0x1F9C0, 0x1F9C0, prExtendedPictographic}, // 8.0 [1] (🧀) cheese wedge {0x1F9C1, 0x1F9C2, prExtendedPictographic}, // 11.0 [2] (🧁..🧂) cupcake..salt {0x1F9C3, 0x1F9CA, prExtendedPictographic}, // 12.0 [8] (🧃..🧊) beverage box..ice cube {0x1F9CB, 0x1F9CC, prExtendedPictographic}, // NA [2] (🧋..🧌) .. {0x1F9CD, 0x1F9CF, prExtendedPictographic}, // 12.0 [3] (🧍..🧏) person standing..deaf person {0x1F9D0, 0x1F9E6, prExtendedPictographic}, // 10.0 [23] (🧐..🧦) face with monocle..socks {0x1F9E7, 0x1F9FF, prExtendedPictographic}, // 11.0 [25] (🧧..🧿) red envelope..nazar amulet {0x1FA00, 0x1FA53, prExtendedPictographic}, // 12.0 [84] (🨀..🩓) NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP {0x1FA54, 0x1FA5F, prExtendedPictographic}, // NA [12] (🩔..🩟) .. {0x1FA60, 0x1FA6D, prExtendedPictographic}, // 11.0 [14] (🩠..🩭) XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER {0x1FA6E, 0x1FA6F, prExtendedPictographic}, // NA [2] (🩮..🩯) .. {0x1FA70, 0x1FA73, prExtendedPictographic}, // 12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA77, prExtendedPictographic}, // NA [4] (🩴..🩷) .. {0x1FA78, 0x1FA7A, prExtendedPictographic}, // 12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7F, prExtendedPictographic}, // NA [5] (🩻..🩿) .. {0x1FA80, 0x1FA82, prExtendedPictographic}, // 12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA8F, prExtendedPictographic}, // NA [13] (🪃..🪏) .. {0x1FA90, 0x1FA95, prExtendedPictographic}, // 12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FFFD, prExtendedPictographic}, // NA[1384] (🪖..🿽) .. {0xE0000, 0xE0000, prControl}, // Cn {0xE0001, 0xE0001, prControl}, // Cf LANGUAGE TAG {0xE0002, 0xE001F, prControl}, // Cn [30] .. {0xE0020, 0xE007F, prExtend}, // Cf [96] TAG SPACE..CANCEL TAG {0xE0080, 0xE00FF, prControl}, // Cn [128] .. {0xE0100, 0xE01EF, prExtend}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 {0xE01F0, 0xE0FFF, prControl}, // Cn [3600] .. } // property returns the Unicode property value (see constants above) of the // given code point. func property(r rune) int { // Run a binary search. from := 0 to := len(codePoints) for to > from { middle := (from + to) / 2 cpRange := codePoints[middle] if int(r) < cpRange[0] { to = middle continue } if int(r) > cpRange[1] { from = middle + 1 continue } return cpRange[2] } return prAny } dependencies/pkg/mod/github.com/rivo/uniseg@v0.2.0/LICENSE.txt0000644000000000000000000000206015024302472022536 0ustar rootrootMIT License Copyright (c) 2019 Oliver Kuederle Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dependencies/pkg/mod/github.com/vbauerster/0000775000000000000000000000000015024302472017764 5ustar rootrootdependencies/pkg/mod/github.com/vbauerster/mpb/0000775000000000000000000000000015024302472020542 5ustar rootrootdependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/0000755000000000000000000000000015024302472021747 5ustar rootrootdependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/_svg/0000755000000000000000000000000015024302472022705 5ustar rootrootdependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/_svg/hIpTa3A5rQz65ssiVuRJu87X6.svg0000644000000000000000000052427415024302472027642 0ustar rootroot~/go/src/github.com/vbauerster/mpb/examples/io/single~/go/src/github.com/vbauerster/mpb/examples/io/singlemaster*gorun-racemain.gogorun-racemain.gogorun-racemain.go40.6MiB/40.6MiB[==========================================================|00:00]7.54MiB/s~/go/src/github.com/vbauerster/mpb/examples/io/singlemaster*46s~/go/src/github.com/vbauerster/mpb/examples/io/singlemastergitclean-fdxgorun-racemain.gogorun-racemain.gogorun-racemain.go16.6KiB/40.6MiB[----------------------------------------------------------|00:00]0b/s66.5KiB/40.6MiB[----------------------------------------------------------|00:00]0b/s134.5KiB/40.6MiB[----------------------------------------------------------|09:01]6.18MiB/s339.6KiB/40.6MiB[----------------------------------------------------------|08:18]6.41MiB/s577.6KiB/40.6MiB[>---------------------------------------------------------|07:32]7.05MiB/s866.6KiB/40.6MiB[>---------------------------------------------------------|06:43]7.36MiB/s1.5MiB/40.6MiB[=>--------------------------------------------------------|05:12]8.12MiB/s1.8MiB/40.6MiB[==>-------------------------------------------------------|04:28]8.48MiB/s2.4MiB/40.6MiB[==>-------------------------------------------------------|03:29]16.62MiB/s2.9MiB/40.6MiB[===>------------------------------------------------------|02:52]15.37MiB/s3.5MiB/40.6MiB[====>-----------------------------------------------------|02:17]14.22MiB/s3.9MiB/40.6MiB[=====>----------------------------------------------------|01:59]13.18MiB/s4.3MiB/40.6MiB[=====>----------------------------------------------------|01:40]12.42MiB/s4.9MiB/40.6MiB[======>---------------------------------------------------|01:19]18.55MiB/s5.2MiB/40.6MiB[======>---------------------------------------------------|01:11]17.39MiB/s5.5MiB/40.6MiB[=======>--------------------------------------------------|01:03]16.23MiB/s5.6MiB/40.6MiB[=======>--------------------------------------------------|01:02]16.04MiB/s6.2MiB/40.6MiB[========>-------------------------------------------------|00:50]17.91MiB/s6.5MiB/40.6MiB[========>-------------------------------------------------|00:47]16.80MiB/s6.7MiB/40.6MiB[=========>------------------------------------------------|00:45]15.85MiB/s7.0MiB/40.6MiB[=========>------------------------------------------------|00:41]14.66MiB/s7.3MiB/40.6MiB[=========>------------------------------------------------|00:38]17.03MiB/s7.5MiB/40.6MiB[==========>-----------------------------------------------|00:39]16.26MiB/s7.7MiB/40.6MiB[==========>-----------------------------------------------|00:38]15.16MiB/s8.0MiB/40.6MiB[==========>-----------------------------------------------|00:36]14.16MiB/s8.1MiB/40.6MiB[===========>----------------------------------------------|00:35]13.56MiB/s8.4MiB/40.6MiB[===========>----------------------------------------------|00:36]13.08MiB/s8.6MiB/40.6MiB[===========>----------------------------------------------|00:34]12.51MiB/s8.8MiB/40.6MiB[============>---------------------------------------------|00:34]12.05MiB/s9.0MiB/40.6MiB[============>---------------------------------------------|00:35]11.56MiB/s9.3MiB/40.6MiB[============>---------------------------------------------|00:32]11.12MiB/s9.5MiB/40.6MiB[=============>--------------------------------------------|00:31]10.75MiB/s9.7MiB/40.6MiB[=============>--------------------------------------------|00:30]10.23MiB/s10.0MiB/40.6MiB[=============>--------------------------------------------|00:29]9.89MiB/s10.2MiB/40.6MiB[==============>-------------------------------------------|00:30]9.54MiB/s10.4MiB/40.6MiB[==============>-------------------------------------------|00:28]9.34MiB/s10.7MiB/40.6MiB[==============>-------------------------------------------|00:27]9.04MiB/s10.9MiB/40.6MiB[===============>------------------------------------------|00:26]8.79MiB/s11.0MiB/40.6MiB[===============>------------------------------------------|00:26]8.75MiB/s11.3MiB/40.6MiB[===============>------------------------------------------|00:25]11.13MiB/s11.5MiB/40.6MiB[===============>------------------------------------------|00:24]10.81MiB/s11.7MiB/40.6MiB[================>-----------------------------------------|00:24]10.45MiB/s11.9MiB/40.6MiB[================>-----------------------------------------|00:25]10.00MiB/s12.1MiB/40.6MiB[================>-----------------------------------------|00:25]9.63MiB/s12.3MiB/40.6MiB[=================>----------------------------------------|00:24]9.26MiB/s12.4MiB/40.6MiB[=================>----------------------------------------|00:24]9.14MiB/s12.6MiB/40.6MiB[=================>----------------------------------------|00:23]8.79MiB/s12.8MiB/40.6MiB[=================>----------------------------------------|00:23]10.07MiB/s12.9MiB/40.6MiB[=================>----------------------------------------|00:23]9.96MiB/s13.1MiB/40.6MiB[==================>---------------------------------------|00:22]9.80MiB/s13.2MiB/40.6MiB[==================>---------------------------------------|00:22]9.71MiB/s13.4MiB/40.6MiB[==================>---------------------------------------|00:22]9.61MiB/s13.6MiB/40.6MiB[==================>---------------------------------------|00:25]9.36MiB/s13.7MiB/40.6MiB[===================>--------------------------------------|00:24]9.19MiB/s13.9MiB/40.6MiB[===================>--------------------------------------|00:24]8.93MiB/s14.0MiB/40.6MiB[===================>--------------------------------------|00:24]8.65MiB/s14.2MiB/40.6MiB[===================>--------------------------------------|00:24]8.48MiB/s14.4MiB/40.6MiB[====================>-------------------------------------|00:23]8.32MiB/s14.5MiB/40.6MiB[====================>-------------------------------------|00:23]8.20MiB/s14.7MiB/40.6MiB[====================>-------------------------------------|00:22]8.12MiB/s14.8MiB/40.6MiB[====================>-------------------------------------|00:22]7.97MiB/s15.0MiB/40.6MiB[====================>-------------------------------------|00:22]7.88MiB/s15.2MiB/40.6MiB[=====================>------------------------------------|00:21]7.74MiB/s15.3MiB/40.6MiB[=====================>------------------------------------|00:21]7.73MiB/s15.4MiB/40.6MiB[=====================>------------------------------------|00:20]9.07MiB/s15.6MiB/40.6MiB[=====================>------------------------------------|00:20]8.84MiB/s15.7MiB/40.6MiB[=====================>------------------------------------|00:20]8.72MiB/s15.9MiB/40.6MiB[======================>-----------------------------------|00:20]8.45MiB/s16.0MiB/40.6MiB[======================>-----------------------------------|00:20]8.27MiB/s16.1MiB/40.6MiB[======================>-----------------------------------|00:22]8.05MiB/s16.3MiB/40.6MiB[======================>-----------------------------------|00:21]7.88MiB/s16.4MiB/40.6MiB[======================>-----------------------------------|00:21]7.69MiB/s16.6MiB/40.6MiB[=======================>----------------------------------|00:21]7.60MiB/s16.7MiB/40.6MiB[=======================>----------------------------------|00:22]7.49MiB/s16.9MiB/40.6MiB[=======================>----------------------------------|00:22]7.41MiB/s17.0MiB/40.6MiB[=======================>----------------------------------|00:21]7.43MiB/s17.2MiB/40.6MiB[========================>---------------------------------|00:21]7.38MiB/s17.4MiB/40.6MiB[========================>---------------------------------|00:23]7.23MiB/s17.5MiB/40.6MiB[========================>---------------------------------|00:23]7.15MiB/s17.7MiB/40.6MiB[========================>---------------------------------|00:24]6.99MiB/s17.9MiB/40.6MiB[=========================>--------------------------------|00:25]6.90MiB/s18.0MiB/40.6MiB[=========================>--------------------------------|00:24]6.83MiB/s18.2MiB/40.6MiB[=========================>--------------------------------|00:23]8.15MiB/s18.3MiB/40.6MiB[=========================>--------------------------------|00:24]7.99MiB/s18.4MiB/40.6MiB[=========================>--------------------------------|00:24]7.88MiB/s18.6MiB/40.6MiB[==========================>-------------------------------|00:24]7.62MiB/s18.7MiB/40.6MiB[==========================>-------------------------------|00:23]7.58MiB/s18.8MiB/40.6MiB[==========================>-------------------------------|00:23]7.47MiB/s19.0MiB/40.6MiB[==========================>-------------------------------|00:22]7.39MiB/s19.1MiB/40.6MiB[==========================>-------------------------------|00:22]7.33MiB/s19.2MiB/40.6MiB[===========================>------------------------------|00:22]8.48MiB/s19.4MiB/40.6MiB[===========================>------------------------------|00:25]8.27MiB/s19.5MiB/40.6MiB[===========================>------------------------------|00:25]8.13MiB/s19.6MiB/40.6MiB[===========================>------------------------------|00:25]7.89MiB/s19.7MiB/40.6MiB[===========================>------------------------------|00:25]7.67MiB/s19.8MiB/40.6MiB[===========================>------------------------------|00:24]7.52MiB/s20.0MiB/40.6MiB[============================>-----------------------------|00:24]7.42MiB/s20.1MiB/40.6MiB[============================>-----------------------------|00:24]7.25MiB/s20.2MiB/40.6MiB[============================>-----------------------------|00:23]7.12MiB/s20.3MiB/40.6MiB[============================>-----------------------------|00:23]6.94MiB/s20.5MiB/40.6MiB[============================>-----------------------------|00:24]6.75MiB/s20.6MiB/40.6MiB[============================>-----------------------------|00:24]6.59MiB/s20.7MiB/40.6MiB[=============================>----------------------------|00:27]6.57MiB/s20.8MiB/40.6MiB[=============================>----------------------------|00:27]6.45MiB/s21.0MiB/40.6MiB[=============================>----------------------------|00:28]6.40MiB/s21.1MiB/40.6MiB[=============================>----------------------------|00:30]6.36MiB/s21.2MiB/40.6MiB[=============================>----------------------------|00:29]6.28MiB/s21.3MiB/40.6MiB[=============================>----------------------------|00:28]6.22MiB/s21.4MiB/40.6MiB[==============================>---------------------------|00:27]6.19MiB/s21.5MiB/40.6MiB[==============================>---------------------------|00:29]6.10MiB/s21.7MiB/40.6MiB[==============================>---------------------------|00:28]6.09MiB/s21.8MiB/40.6MiB[==============================>---------------------------|00:27]6.04MiB/s21.9MiB/40.6MiB[==============================>---------------------------|00:27]5.88MiB/s22.0MiB/40.6MiB[===============================>--------------------------|00:27]5.83MiB/s22.2MiB/40.6MiB[===============================>--------------------------|00:26]5.78MiB/s22.3MiB/40.6MiB[===============================>--------------------------|00:25]5.80MiB/s22.4MiB/40.6MiB[===============================>--------------------------|00:24]5.76MiB/s22.5MiB/40.6MiB[===============================>--------------------------|00:24]5.72MiB/s22.7MiB/40.6MiB[===============================>--------------------------|00:23]5.70MiB/s22.8MiB/40.6MiB[================================>-------------------------|00:24]5.60MiB/s22.8MiB/40.6MiB[================================>-------------------------|00:24]5.58MiB/s23.0MiB/40.6MiB[================================>-------------------------|00:25]6.35MiB/s23.1MiB/40.6MiB[================================>-------------------------|00:25]6.33MiB/s23.2MiB/40.6MiB[================================>-------------------------|00:24]6.33MiB/s23.4MiB/40.6MiB[================================>-------------------------|00:24]6.31MiB/s23.5MiB/40.6MiB[=================================>------------------------|00:23]6.28MiB/s23.6MiB/40.6MiB[=================================>------------------------|00:23]6.20MiB/s23.7MiB/40.6MiB[=================================>------------------------|00:22]6.26MiB/s23.8MiB/40.6MiB[=================================>------------------------|00:22]6.26MiB/s23.9MiB/40.6MiB[=================================>------------------------|00:21]6.31MiB/s24.0MiB/40.6MiB[=================================>------------------------|00:21]6.26MiB/s24.2MiB/40.6MiB[==================================>-----------------------|00:22]6.25MiB/s24.3MiB/40.6MiB[==================================>-----------------------|00:25]6.20MiB/s24.4MiB/40.6MiB[==================================>-----------------------|00:24]6.20MiB/s24.5MiB/40.6MiB[==================================>-----------------------|00:23]6.14MiB/s24.7MiB/40.6MiB[==================================>-----------------------|00:24]6.16MiB/s24.8MiB/40.6MiB[==================================>-----------------------|00:24]6.13MiB/s24.9MiB/40.6MiB[===================================>----------------------|00:23]6.14MiB/s25.0MiB/40.6MiB[===================================>----------------------|00:23]6.17MiB/s25.1MiB/40.6MiB[===================================>----------------------|00:23]6.16MiB/s25.3MiB/40.6MiB[===================================>----------------------|00:22]6.05MiB/s25.4MiB/40.6MiB[===================================>----------------------|00:22]6.14MiB/s25.5MiB/40.6MiB[===================================>----------------------|00:21]6.06MiB/s25.6MiB/40.6MiB[====================================>---------------------|00:22]6.04MiB/s25.8MiB/40.6MiB[====================================>---------------------|00:21]6.02MiB/s25.9MiB/40.6MiB[====================================>---------------------|00:22]5.96MiB/s26.0MiB/40.6MiB[====================================>---------------------|00:24]6.05MiB/s26.1MiB/40.6MiB[====================================>---------------------|00:23]6.03MiB/s26.2MiB/40.6MiB[=====================================>--------------------|00:22]7.18MiB/s26.3MiB/40.6MiB[=====================================>--------------------|00:22]7.10MiB/s26.4MiB/40.6MiB[=====================================>--------------------|00:21]7.09MiB/s26.5MiB/40.6MiB[=====================================>--------------------|00:21]7.09MiB/s26.7MiB/40.6MiB[=====================================>--------------------|00:21]7.07MiB/s26.8MiB/40.6MiB[=====================================>--------------------|00:20]7.02MiB/s26.8MiB/40.6MiB[=====================================>--------------------|00:20]6.92MiB/s27.0MiB/40.6MiB[======================================>-------------------|00:19]6.89MiB/s27.1MiB/40.6MiB[======================================>-------------------|00:19]6.99MiB/s27.2MiB/40.6MiB[======================================>-------------------|00:18]6.98MiB/s27.3MiB/40.6MiB[======================================>-------------------|00:18]6.99MiB/s27.5MiB/40.6MiB[======================================>-------------------|00:20]6.98MiB/s27.5MiB/40.6MiB[======================================>-------------------|00:20]6.94MiB/s27.7MiB/40.6MiB[=======================================>------------------|00:20]7.98MiB/s27.8MiB/40.6MiB[=======================================>------------------|00:19]7.88MiB/s27.8MiB/40.6MiB[=======================================>------------------|00:19]7.81MiB/s28.0MiB/40.6MiB[=======================================>------------------|00:18]7.71MiB/s28.0MiB/40.6MiB[=======================================>------------------|00:18]7.58MiB/s28.1MiB/40.6MiB[=======================================>------------------|00:18]7.44MiB/s28.2MiB/40.6MiB[=======================================>------------------|00:18]7.38MiB/s28.3MiB/40.6MiB[========================================>-----------------|00:17]7.33MiB/s28.4MiB/40.6MiB[========================================>-----------------|00:17]7.19MiB/s28.5MiB/40.6MiB[========================================>-----------------|00:18]7.17MiB/s28.6MiB/40.6MiB[========================================>-----------------|00:17]7.17MiB/s28.7MiB/40.6MiB[========================================>-----------------|00:20]7.11MiB/s28.8MiB/40.6MiB[========================================>-----------------|00:19]7.03MiB/s28.9MiB/40.6MiB[========================================>-----------------|00:20]6.89MiB/s29.0MiB/40.6MiB[=========================================>----------------|00:20]6.79MiB/s29.1MiB/40.6MiB[=========================================>----------------|00:19]6.72MiB/s29.2MiB/40.6MiB[=========================================>----------------|00:21]6.60MiB/s29.4MiB/40.6MiB[=========================================>----------------|00:20]6.50MiB/s29.4MiB/40.6MiB[=========================================>----------------|00:20]6.48MiB/s29.5MiB/40.6MiB[=========================================>----------------|00:20]6.44MiB/s29.7MiB/40.6MiB[=========================================>----------------|00:19]6.37MiB/s29.7MiB/40.6MiB[==========================================>---------------|00:19]6.31MiB/s29.8MiB/40.6MiB[==========================================>---------------|00:21]6.31MiB/s29.9MiB/40.6MiB[==========================================>---------------|00:20]6.34MiB/s30.0MiB/40.6MiB[==========================================>---------------|00:20]6.32MiB/s30.2MiB/40.6MiB[==========================================>---------------|00:19]6.22MiB/s30.2MiB/40.6MiB[==========================================>---------------|00:19]6.18MiB/s30.3MiB/40.6MiB[==========================================>---------------|00:19]6.16MiB/s30.5MiB/40.6MiB[===========================================>--------------|00:19]6.21MiB/s30.6MiB/40.6MiB[===========================================>--------------|00:18]6.19MiB/s30.7MiB/40.6MiB[===========================================>--------------|00:17]6.18MiB/s30.8MiB/40.6MiB[===========================================>--------------|00:17]6.19MiB/s30.9MiB/40.6MiB[===========================================>--------------|00:17]5.99MiB/s31.0MiB/40.6MiB[===========================================>--------------|00:17]5.89MiB/s31.1MiB/40.6MiB[============================================>-------------|00:16]5.92MiB/s31.3MiB/40.6MiB[============================================>-------------|00:15]5.80MiB/s31.4MiB/40.6MiB[============================================>-------------|00:15]5.85MiB/s31.5MiB/40.6MiB[============================================>-------------|00:14]5.87MiB/s31.7MiB/40.6MiB[============================================>-------------|00:13]5.93MiB/s31.9MiB/40.6MiB[=============================================>------------|00:13]5.95MiB/s32.0MiB/40.6MiB[=============================================>------------|00:12]5.98MiB/s32.2MiB/40.6MiB[=============================================>------------|00:11]5.99MiB/s32.4MiB/40.6MiB[=============================================>------------|00:11]6.04MiB/s32.5MiB/40.6MiB[=============================================>------------|00:10]6.08MiB/s32.7MiB/40.6MiB[==============================================>-----------|00:10]6.03MiB/s32.9MiB/40.6MiB[==============================================>-----------|00:09]6.05MiB/s33.1MiB/40.6MiB[==============================================>-----------|00:09]6.08MiB/s33.3MiB/40.6MiB[===============================================>----------|00:08]6.13MiB/s33.6MiB/40.6MiB[===============================================>----------|00:07]6.30MiB/s33.8MiB/40.6MiB[===============================================>----------|00:07]6.33MiB/s34.0MiB/40.6MiB[================================================>---------|00:07]6.44MiB/s34.3MiB/40.6MiB[================================================>---------|00:06]6.88MiB/s34.6MiB/40.6MiB[================================================>---------|00:05]6.88MiB/s34.8MiB/40.6MiB[=================================================>--------|00:05]6.88MiB/s35.2MiB/40.6MiB[=================================================>--------|00:04]10.50MiB/s35.4MiB/40.6MiB[==================================================>-------|00:04]10.17MiB/s35.6MiB/40.6MiB[==================================================>-------|00:03]9.88MiB/s35.9MiB/40.6MiB[==================================================>-------|00:03]9.36MiB/s36.2MiB/40.6MiB[===================================================>------|00:03]9.27MiB/s36.4MiB/40.6MiB[===================================================>------|00:02]9.08MiB/s36.7MiB/40.6MiB[===================================================>------|00:02]8.83MiB/s37.0MiB/40.6MiB[====================================================>-----|00:02]8.64MiB/s37.2MiB/40.6MiB[====================================================>-----|00:02]8.48MiB/s37.5MiB/40.6MiB[=====================================================>----|00:02]8.26MiB/s37.9MiB/40.6MiB[=====================================================>----|00:02]8.16MiB/s38.1MiB/40.6MiB[=====================================================>----|00:01]8.21MiB/s38.4MiB/40.6MiB[======================================================>---|00:01]8.38MiB/s38.7MiB/40.6MiB[======================================================>---|00:01]8.31MiB/s39.0MiB/40.6MiB[=======================================================>--|00:01]8.40MiB/s39.3MiB/40.6MiB[=======================================================>--|00:01]8.21MiB/s39.6MiB/40.6MiB[========================================================>-|00:00]8.14MiB/s40.0MiB/40.6MiB[========================================================>-|00:00]7.97MiB/s40.2MiB/40.6MiB[==========================================================|00:00]7.75MiB/sdependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/_svg/wHzf1M7sd7B3zVa2scBMnjqRf.svg0000644000000000000000000057565515024302472027774 0ustar rootroot~/go/src/github.com/vbauerster/mpb/examples/complex~/go/src/github.com/vbauerster/mpb/examples/complexmaster*gorun-racemain.gogorun-racemain.gogorun-racemain.goTask#03:installing00:08[======>-------------------------------------------------------]11%Task#03:installing00:07[==============>-----------------------------------------------]24%Task#02:done!Task#03:done!Task#01:done!Task#00:done!~/go/src/github.com/vbauerster/mpb/examples/complexmaster*19s~/go/src/github.com/vbauerster/mpb/examples/complexmastergorun-racemain.gogorun-racemain.gogorun-racemain.gogorun-racemain.goTask#00:downloading4/268[>-------------------------------------------------------------]1%Task#01:downloading2/274[--------------------------------------------------------------]1%Task#02:downloading3/114[=>------------------------------------------------------------]3%Task#03:downloading4/114[=>------------------------------------------------------------]4%Task#00:downloading6/268[>-------------------------------------------------------------]2%Task#01:downloading8/274[=>------------------------------------------------------------]3%Task#02:downloading9/114[====>---------------------------------------------------------]8%Task#03:downloading12/114[======>-------------------------------------------------------]11%Task#00:downloading9/268[=>------------------------------------------------------------]3%Task#01:downloading12/274[==>-----------------------------------------------------------]4%Task#02:downloading12/114[======>-------------------------------------------------------]11%Task#03:downloading24/114[============>-------------------------------------------------]21%Task#00:downloading11/268[==>-----------------------------------------------------------]4%Task#01:downloading16/274[===>----------------------------------------------------------]6%Task#02:downloading18/114[=========>----------------------------------------------------]16%Task#03:downloading36/114[===================>------------------------------------------]32%Task#00:downloading13/268[==>-----------------------------------------------------------]5%Task#01:downloading22/274[====>---------------------------------------------------------]8%Task#02:downloading27/114[==============>-----------------------------------------------]24%Task#03:downloading40/114[=====================>----------------------------------------]35%Task#00:downloading15/268[==>-----------------------------------------------------------]6%Task#01:downloading24/274[====>---------------------------------------------------------]9%Task#02:downloading30/114[===============>----------------------------------------------]26%Task#03:downloading48/114[=========================>------------------------------------]42%Task#00:downloading16/268[===>----------------------------------------------------------]6%Task#01:downloading30/274[======>-------------------------------------------------------]11%Task#02:downloading39/114[====================>-----------------------------------------]34%Task#03:downloading56/114[=============================>--------------------------------]49%Task#00:downloading18/268[===>----------------------------------------------------------]7%Task#01:downloading34/274[=======>------------------------------------------------------]12%Task#02:downloading42/114[======================>---------------------------------------]37%Task#03:downloading64/114[==================================>---------------------------]56%Task#00:downloading19/268[===>----------------------------------------------------------]7%Task#01:downloading40/274[========>-----------------------------------------------------]15%Task#02:downloading45/114[=======================>--------------------------------------]39%Task#03:downloading68/114[====================================>-------------------------]60%Task#00:downloading21/268[====>---------------------------------------------------------]8%Task#01:downloading44/274[=========>----------------------------------------------------]16%Task#02:downloading54/114[============================>---------------------------------]47%Task#03:downloading76/114[========================================>---------------------]67%Task#00:downloading25/268[=====>--------------------------------------------------------]9%Task#01:downloading52/274[===========>--------------------------------------------------]19%Task#02:downloading60/114[================================>-----------------------------]53%Task#03:downloading80/114[===========================================>------------------]70%Task#00:downloading27/268[=====>--------------------------------------------------------]10%Task#01:downloading54/274[===========>--------------------------------------------------]20%Task#02:downloading63/114[=================================>----------------------------]55%Task#03:downloading88/114[===============================================>--------------]77%Task#00:downloading29/268[======>-------------------------------------------------------]11%Task#01:downloading58/274[============>-------------------------------------------------]21%Task#02:downloading69/114[=====================================>------------------------]61%Task#03:downloading92/114[=================================================>------------]81%Task#00:downloading30/268[======>-------------------------------------------------------]11%Task#01:downloading60/274[=============>------------------------------------------------]22%Task#02:downloading75/114[========================================>---------------------]66%Task#03:downloading100/114[=====================================================>--------]88%Task#00:downloading32/268[======>-------------------------------------------------------]12%Task#01:downloading66/274[==============>-----------------------------------------------]24%Task#02:downloading78/114[=========================================>--------------------]68%Task#03:downloading108/114[==========================================================>---]95%Task#00:downloading34/268[=======>------------------------------------------------------]13%Task#01:downloading70/274[===============>----------------------------------------------]26%Task#02:downloading84/114[=============================================>----------------]74%Task#03:downloading114/114[==============================================================]100%Task#00:downloading35/268[=======>------------------------------------------------------]13%Task#01:downloading74/274[================>---------------------------------------------]27%Task#02:downloading90/114[================================================>-------------]79%Task#03:installing00:00[--------------------------------------------------------------]1%Task#00:downloading37/268[========>-----------------------------------------------------]14%Task#01:downloading76/274[================>---------------------------------------------]28%Task#02:downloading96/114[===================================================>----------]84%Task#03:installing00:00[=>------------------------------------------------------------]2%Task#00:downloading40/268[========>-----------------------------------------------------]15%Task#01:downloading78/274[=================>--------------------------------------------]28%Task#02:downloading102/114[======================================================>-------]89%Task#03:installing00:00[=>------------------------------------------------------------]4%Task#00:downloading42/268[=========>----------------------------------------------------]16%Task#01:downloading82/274[==================>-------------------------------------------]30%Task#02:downloading108/114[==========================================================>---]95%Task#03:installing00:00[==>-----------------------------------------------------------]4%Task#00:downloading44/268[=========>----------------------------------------------------]16%Task#01:downloading86/274[==================>-------------------------------------------]31%Task#02:downloading114/114[==============================================================]100%Task#03:installing00:00[==>-----------------------------------------------------------]6%Task#00:downloading47/268[==========>---------------------------------------------------]18%Task#01:downloading88/274[===================>------------------------------------------]32%Task#02:installing00:00[=>------------------------------------------------------------]3%Task#03:installing00:09[====>---------------------------------------------------------]7%Task#00:downloading50/268[===========>--------------------------------------------------]19%Task#01:downloading92/274[====================>-----------------------------------------]34%Task#02:installing00:00[===>----------------------------------------------------------]6%Task#03:installing00:09[====>---------------------------------------------------------]9%Task#00:downloading52/268[===========>--------------------------------------------------]19%Task#01:downloading96/274[=====================>----------------------------------------]35%Task#02:installing00:00[=====>--------------------------------------------------------]9%Task#00:downloading54/268[===========>--------------------------------------------------]20%Task#01:downloading98/274[=====================>----------------------------------------]36%Task#02:installing00:03[========>-----------------------------------------------------]14%Task#00:downloading56/268[============>-------------------------------------------------]21%Task#01:downloading102/274[======================>---------------------------------------]37%Task#02:installing00:03[=========>----------------------------------------------------]17%Task#03:installing00:08[=======>------------------------------------------------------]12%Task#00:downloading57/268[============>-------------------------------------------------]21%Task#01:downloading106/274[=======================>--------------------------------------]39%Task#02:installing00:03[===========>--------------------------------------------------]19%Task#03:installing00:08[=======>------------------------------------------------------]14%Task#00:downloading59/268[=============>------------------------------------------------]22%Task#01:downloading114/274[=========================>------------------------------------]42%Task#02:installing00:03[=============>------------------------------------------------]23%Task#03:installing00:08[========>-----------------------------------------------------]14%Task#00:downloading61/268[=============>------------------------------------------------]23%Task#01:downloading120/274[==========================>-----------------------------------]44%Task#02:installing00:03[===============>----------------------------------------------]25%Task#03:installing00:08[=========>----------------------------------------------------]16%Task#00:downloading63/268[==============>-----------------------------------------------]24%Task#01:downloading126/274[============================>---------------------------------]46%Task#02:installing00:02[=================>--------------------------------------------]29%Task#03:installing00:08[=========>----------------------------------------------------]17%Task#00:downloading67/268[===============>----------------------------------------------]25%Task#01:downloading130/274[============================>---------------------------------]47%Task#02:installing00:02[==================>-------------------------------------------]31%Task#03:installing00:08[==========>---------------------------------------------------]17%Task#00:downloading68/268[===============>----------------------------------------------]25%Task#01:downloading132/274[=============================>--------------------------------]48%Task#02:installing00:02[=====================>----------------------------------------]36%Task#03:installing00:08[===========>--------------------------------------------------]19%Task#00:downloading69/268[===============>----------------------------------------------]26%Task#01:downloading136/274[==============================>-------------------------------]50%Task#02:installing00:02[======================>---------------------------------------]37%Task#03:installing00:08[============>-------------------------------------------------]20%Task#00:downloading71/268[===============>----------------------------------------------]26%Task#01:downloading140/274[===============================>------------------------------]51%Task#02:installing00:02[========================>-------------------------------------]41%Task#03:installing00:07[============>-------------------------------------------------]22%Task#00:downloading73/268[================>---------------------------------------------]27%Task#01:downloading144/274[================================>-----------------------------]53%Task#02:installing00:02[=========================>------------------------------------]42%Task#00:downloading75/268[================>---------------------------------------------]28%Task#01:downloading150/274[=================================>----------------------------]55%Task#02:installing00:02[===========================>----------------------------------]45%Task#00:downloading78/268[=================>--------------------------------------------]29%Task#01:downloading154/274[==================================>---------------------------]56%Task#02:installing00:02[============================>---------------------------------]47%Task#03:installing00:07[===============>----------------------------------------------]26%Task#00:downloading80/268[==================>-------------------------------------------]30%Task#01:downloading158/274[===================================>--------------------------]58%Task#02:installing00:02[==============================>-------------------------------]50%Task#03:installing00:07[================>---------------------------------------------]27%Task#00:downloading82/268[==================>-------------------------------------------]31%Task#01:downloading162/274[====================================>-------------------------]59%Task#02:installing00:02[===============================>------------------------------]51%Task#03:installing00:06[=================>--------------------------------------------]29%Task#00:downloading87/268[===================>------------------------------------------]32%Task#01:downloading164/274[====================================>-------------------------]60%Task#02:installing00:01[==================================>---------------------------]56%Task#03:installing00:06[==================>-------------------------------------------]31%Task#00:downloading90/268[====================>-----------------------------------------]34%Task#01:downloading166/274[=====================================>------------------------]61%Task#02:installing00:01[====================================>-------------------------]60%Task#03:installing00:06[===================>------------------------------------------]32%Task#00:downloading92/268[====================>-----------------------------------------]34%Task#01:downloading172/274[======================================>-----------------------]63%Task#02:installing00:01[======================================>-----------------------]62%Task#03:installing00:05[====================>-----------------------------------------]34%Task#00:downloading93/268[=====================>----------------------------------------]35%Task#01:downloading176/274[=======================================>----------------------]64%Task#02:installing00:01[=======================================>----------------------]65%Task#03:installing00:05[=====================>----------------------------------------]35%Task#00:downloading96/268[=====================>----------------------------------------]36%Task#01:downloading178/274[=======================================>----------------------]65%Task#02:installing00:01[==========================================>-------------------]69%Task#03:installing00:05[=====================>----------------------------------------]36%Task#00:downloading99/268[======================>---------------------------------------]37%Task#01:downloading182/274[========================================>---------------------]66%Task#02:installing00:01[===========================================>------------------]71%Task#03:installing00:05[======================>---------------------------------------]37%Task#00:downloading102/268[=======================>--------------------------------------]38%Task#01:downloading186/274[=========================================>--------------------]68%Task#02:installing00:00[==============================================>---------------]75%Task#03:installing00:06[======================>---------------------------------------]37%Task#00:downloading105/268[=======================>--------------------------------------]39%Task#01:downloading188/274[==========================================>-------------------]69%Task#02:installing00:00[===============================================>--------------]78%Task#03:installing00:06[=======================>--------------------------------------]39%Task#00:downloading107/268[========================>-------------------------------------]40%Task#01:downloading192/274[==========================================>-------------------]70%Task#02:installing00:00[=================================================>------------]80%Task#03:installing00:05[========================>-------------------------------------]40%Task#00:downloading109/268[========================>-------------------------------------]41%Task#01:downloading194/274[===========================================>------------------]71%Task#02:installing00:00[==================================================>-----------]82%Task#03:installing00:05[========================>-------------------------------------]41%Task#00:downloading112/268[=========================>------------------------------------]42%Task#01:downloading198/274[============================================>-----------------]72%Task#02:installing00:00[===================================================>----------]84%Task#03:installing00:05[=========================>------------------------------------]42%Task#00:downloading114/268[=========================>------------------------------------]43%Task#01:downloading202/274[=============================================>----------------]74%Task#02:installing00:00[=====================================================>--------]88%Task#03:installing00:05[==========================>-----------------------------------]44%Task#00:downloading116/268[==========================>-----------------------------------]43%Task#01:downloading206/274[==============================================>---------------]75%Task#02:installing00:00[========================================================>-----]92%Task#03:installing00:04[===========================>----------------------------------]46%Task#00:downloading119/268[===========================>----------------------------------]44%Task#01:downloading210/274[===============================================>--------------]77%Task#02:installing00:00[=========================================================>----]94%Task#03:installing00:04[=============================>--------------------------------]48%Task#00:downloading122/268[===========================>----------------------------------]46%Task#01:downloading212/274[===============================================>--------------]77%Task#02:installing00:00[===========================================================>--]97%Task#03:installing00:04[=============================>--------------------------------]49%Task#00:downloading124/268[============================>---------------------------------]46%Task#01:downloading214/274[===============================================>--------------]78%Task#02:installing00:00[==============================================================]99%Task#03:installing00:04[==============================>-------------------------------]50%Task#00:downloading126/268[============================>---------------------------------]47%Task#01:downloading218/274[================================================>-------------]80%Task#02:installing00:00[==============================================================]100%Task#03:installing00:04[===============================>------------------------------]52%Task#00:downloading127/268[============================>---------------------------------]47%Task#01:downloading220/274[=================================================>------------]80%Task#03:installing00:04[================================>-----------------------------]53%Task#00:downloading130/268[=============================>--------------------------------]49%Task#01:downloading224/274[==================================================>-----------]82%Task#03:installing00:03[=================================>----------------------------]55%Task#00:downloading132/268[==============================>-------------------------------]49%Task#01:downloading230/274[===================================================>----------]84%Task#03:installing00:03[==================================>---------------------------]57%Task#00:downloading134/268[==============================>-------------------------------]50%Task#01:downloading234/274[====================================================>---------]85%Task#03:installing00:03[====================================>-------------------------]59%Task#00:downloading136/268[==============================>-------------------------------]51%Task#01:downloading238/274[=====================================================>--------]87%Task#03:installing00:03[====================================>-------------------------]60%Task#00:downloading139/268[===============================>------------------------------]52%Task#01:downloading242/274[======================================================>-------]88%Task#03:installing00:03[=====================================>------------------------]61%Task#00:downloading141/268[================================>-----------------------------]53%Task#01:downloading246/274[=======================================================>------]90%Task#03:installing00:03[======================================>-----------------------]63%Task#00:downloading143/268[================================>-----------------------------]53%Task#01:downloading254/274[========================================================>-----]93%Task#03:installing00:02[=======================================>----------------------]65%Task#00:downloading147/268[=================================>----------------------------]55%Task#01:downloading258/274[=========================================================>----]94%Task#03:installing00:02[========================================>---------------------]66%Task#00:downloading149/268[=================================>----------------------------]56%Task#01:downloading262/274[==========================================================>---]96%Task#03:installing00:02[=========================================>--------------------]68%Task#00:downloading150/268[==================================>---------------------------]56%Task#01:downloading268/274[============================================================>-]98%Task#03:installing00:02[==========================================>-------------------]69%Task#00:downloading152/268[==================================>---------------------------]57%Task#01:downloading272/274[==============================================================]99%Task#03:installing00:02[===========================================>------------------]70%Task#00:downloading156/268[===================================>--------------------------]58%Task#01:downloading274/274[==============================================================]100%Task#03:installing00:02[===========================================>------------------]71%Task#00:downloading160/268[====================================>-------------------------]60%Task#01:installing00:00[==>-----------------------------------------------------------]5%Task#03:installing00:02[============================================>-----------------]73%Task#00:downloading162/268[====================================>-------------------------]60%Task#01:installing00:00[====>---------------------------------------------------------]8%Task#03:installing00:02[=============================================>----------------]74%Task#00:downloading163/268[=====================================>------------------------]61%Task#01:installing00:00[=======>------------------------------------------------------]14%Task#03:installing00:02[=============================================>----------------]75%Task#00:downloading168/268[======================================>-----------------------]63%Task#01:installing00:00[==========>---------------------------------------------------]17%Task#03:installing00:02[==============================================>---------------]76%Task#00:downloading170/268[======================================>-----------------------]63%Task#01:installing00:02[============>-------------------------------------------------]20%Task#03:installing00:02[===============================================>--------------]77%Task#00:downloading173/268[=======================================>----------------------]65%Task#01:installing00:02[===============>----------------------------------------------]25%Task#03:installing00:01[================================================>-------------]78%Task#00:downloading176/268[========================================>---------------------]66%Task#01:installing00:02[==================>-------------------------------------------]31%Task#03:installing00:01[================================================>-------------]79%Task#00:downloading178/268[========================================>---------------------]66%Task#01:installing00:01[=====================>----------------------------------------]36%Task#03:installing00:01[=================================================>------------]81%Task#00:downloading180/268[=========================================>--------------------]67%Task#01:installing00:01[======================>---------------------------------------]37%Task#03:installing00:01[==================================================>-----------]83%Task#00:downloading182/268[=========================================>--------------------]68%Task#01:installing00:01[========================>-------------------------------------]41%Task#03:installing00:01[===================================================>----------]83%Task#00:downloading185/268[==========================================>-------------------]69%Task#01:installing00:01[=========================>------------------------------------]42%Task#03:installing00:01[===================================================>----------]84%Task#00:downloading188/268[==========================================>-------------------]70%Task#01:installing00:01[===========================>----------------------------------]46%Task#03:installing00:01[====================================================>---------]85%Task#00:downloading190/268[===========================================>------------------]71%Task#01:installing00:01[=============================>--------------------------------]49%Task#03:installing00:01[=====================================================>--------]87%Task#00:downloading192/268[===========================================>------------------]72%Task#01:installing00:01[================================>-----------------------------]53%Task#03:installing00:01[======================================================>-------]88%Task#00:downloading194/268[============================================>-----------------]72%Task#01:installing00:01[=================================>----------------------------]54%Task#03:installing00:00[======================================================>-------]89%Task#00:downloading197/268[=============================================>----------------]74%Task#01:installing00:01[===================================>--------------------------]58%Task#03:installing00:00[=======================================================>------]91%Task#00:downloading198/268[=============================================>----------------]74%Task#01:installing00:01[=====================================>------------------------]61%Task#03:installing00:00[========================================================>-----]93%Task#00:downloading202/268[==============================================>---------------]75%Task#01:installing00:01[======================================>-----------------------]63%Task#03:installing00:00[=========================================================>----]93%Task#00:downloading204/268[==============================================>---------------]76%Task#01:installing00:01[========================================>---------------------]66%Task#03:installing00:00[==========================================================>---]94%Task#00:downloading206/268[===============================================>--------------]77%Task#01:installing00:01[==========================================>-------------------]69%Task#03:installing00:00[===========================================================>--]97%Task#00:downloading209/268[===============================================>--------------]78%Task#01:installing00:00[============================================>-----------------]73%Task#03:installing00:00[===========================================================>--]98%Task#00:downloading212/268[================================================>-------------]79%Task#01:installing00:00[==============================================>---------------]76%Task#03:installing00:00[============================================================>-]99%Task#00:downloading213/268[================================================>-------------]79%Task#01:installing00:00[================================================>-------------]80%Task#03:installing00:00[==============================================================]100%Task#00:downloading215/268[=================================================>------------]80%Task#01:installing00:00[=================================================>------------]81%Task#00:downloading217/268[=================================================>------------]81%Task#01:installing00:00[=====================================================>--------]86%Task#00:downloading219/268[==================================================>-----------]82%Task#01:installing00:00[======================================================>-------]88%Task#00:downloading220/268[==================================================>-----------]82%Task#01:installing00:00[========================================================>-----]92%Task#00:downloading221/268[==================================================>-----------]82%Task#01:installing00:00[=========================================================>----]93%Task#00:downloading225/268[===================================================>----------]84%Task#01:installing00:00[===========================================================>--]97%Task#00:downloading227/268[====================================================>---------]85%Task#01:installing00:00[============================================================>-]98%Task#00:downloading229/268[====================================================>---------]85%Task#01:installing00:00[==============================================================]100%Task#00:downloading230/268[====================================================>---------]86%Task#00:downloading232/268[=====================================================>--------]87%Task#00:downloading233/268[=====================================================>--------]87%Task#00:downloading235/268[=====================================================>--------]88%Task#00:downloading236/268[======================================================>-------]88%Task#00:downloading237/268[======================================================>-------]88%Task#00:downloading240/268[=======================================================>------]90%Task#00:downloading241/268[=======================================================>------]90%Task#00:downloading244/268[=======================================================>------]91%Task#00:downloading246/268[========================================================>-----]92%Task#00:downloading249/268[=========================================================>----]93%Task#00:downloading251/268[=========================================================>----]94%Task#00:downloading253/268[==========================================================>---]94%Task#00:downloading256/268[==========================================================>---]96%Task#00:downloading257/268[==========================================================>---]96%Task#00:downloading259/268[===========================================================>--]97%Task#00:downloading261/268[===========================================================>--]97%Task#00:downloading262/268[============================================================>-]98%Task#00:downloading263/268[============================================================>-]98%Task#00:downloading266/268[==============================================================]99%Task#00:downloading268/268[==============================================================]100%Task#00:installing00:00[=>------------------------------------------------------------]3%Task#00:installing00:00[======>-------------------------------------------------------]12%Task#00:installing00:00[========>-----------------------------------------------------]14%Task#00:installing00:00[============>-------------------------------------------------]20%Task#00:installing00:00[=================>--------------------------------------------]29%Task#00:installing00:01[===================>------------------------------------------]32%Task#00:installing00:01[==========================>-----------------------------------]43%Task#00:installing00:01[==============================>-------------------------------]49%Task#00:installing00:00[=================================>----------------------------]55%Task#00:installing00:00[=====================================>------------------------]61%Task#00:installing00:00[========================================>---------------------]67%Task#00:installing00:00[============================================>-----------------]72%Task#00:installing00:00[=================================================>------------]81%Task#00:installing00:00[=====================================================>--------]87%Task#00:installing00:00[=========================================================>----]93%Task#00:installing00:00[============================================================>-]99%Task#00:installing00:00[==============================================================]100%dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/_svg/godEMrCZmJkHYH1X9dN4Nm0U7.svg0000644000000000000000000020251715024302472027521 0ustar rootroot~/go/src/github.com/vbauerster/mpb/examples/dynTotal~/go/src/github.com/vbauerster/mpb/examples/dynTotalmaster*gorun-racemain.gogorun-racemain.gogorun-racemain.go55.7KiB/56.7KiB[============================================================>-]98%100.7KiB/100.7KiB[==============================================================]100%~/go/src/github.com/vbauerster/mpb/examples/dynTotalmaster*13s~/go/src/github.com/vbauerster/mpb/examples/dynTotalmaster~/go/src/github.com/vbauerster/mpb/examples/dynTotalmastergorun-racemain.gogorun-racemain.gogorun-racemain.gogorun-racemain.go519b/1.5KiB[====================>-----------------------------------------]34%2.2KiB/3.2KiB[==========================================>-------------------]69%2.5KiB/3.5KiB[===========================================>------------------]72%3.2KiB/4.2KiB[==============================================>---------------]76%3.6KiB/4.6KiB[================================================>-------------]78%6.5KiB/7.5KiB[=====================================================>--------]87%8.0KiB/9.0KiB[======================================================>-------]89%10.6KiB/11.6KiB[========================================================>-----]91%12.1KiB/13.1KiB[========================================================>-----]92%13.1KiB/14.1KiB[=========================================================>----]93%13.2KiB/14.2KiB[=========================================================>----]93%13.9KiB/14.9KiB[=========================================================>----]93%15.5KiB/16.5KiB[=========================================================>----]94%15.8KiB/16.8KiB[=========================================================>----]94%17.6KiB/18.6KiB[==========================================================>---]95%18.9KiB/19.9KiB[==========================================================>---]95%20.0KiB/21.0KiB[==========================================================>---]95%21.0KiB/22.0KiB[==========================================================>---]95%21.3KiB/22.3KiB[==========================================================>---]96%22.6KiB/23.6KiB[==========================================================>---]96%23.9KiB/24.9KiB[===========================================================>--]96%25.7KiB/26.7KiB[===========================================================>--]96%26.1KiB/27.1KiB[===========================================================>--]96%26.8KiB/27.8KiB[===========================================================>--]96%28.0KiB/29.0KiB[===========================================================>--]97%28.4KiB/29.4KiB[===========================================================>--]97%29.6KiB/30.6KiB[===========================================================>--]97%30.0KiB/31.0KiB[===========================================================>--]97%30.4KiB/31.4KiB[===========================================================>--]97%31.8KiB/32.8KiB[===========================================================>--]97%34.4KiB/35.4KiB[===========================================================>--]97%34.5KiB/35.5KiB[===========================================================>--]97%37.0KiB/38.0KiB[===========================================================>--]97%38.5KiB/39.5KiB[===========================================================>--]97%40.4KiB/41.4KiB[============================================================>-]98%41.1KiB/42.1KiB[============================================================>-]98%42.2KiB/43.2KiB[============================================================>-]98%43.9KiB/44.9KiB[============================================================>-]98%44.9KiB/45.9KiB[============================================================>-]98%46.2KiB/47.2KiB[============================================================>-]98%46.9KiB/47.9KiB[============================================================>-]98%48.4KiB/49.4KiB[============================================================>-]98%48.7KiB/49.7KiB[============================================================>-]98%49.3KiB/50.3KiB[============================================================>-]98%50.1KiB/51.1KiB[============================================================>-]98%50.5KiB/51.5KiB[============================================================>-]98%50.6KiB/51.6KiB[============================================================>-]98%50.8KiB/51.8KiB[============================================================>-]98%51.7KiB/52.7KiB[============================================================>-]98%52.7KiB/53.7KiB[============================================================>-]98%53.6KiB/54.6KiB[============================================================>-]98%57.5KiB/58.5KiB[============================================================>-]98%58.3KiB/59.3KiB[============================================================>-]98%58.7KiB/59.7KiB[============================================================>-]98%60.1KiB/61.1KiB[============================================================>-]98%62.0KiB/63.0KiB[============================================================>-]98%63.7KiB/64.7KiB[============================================================>-]98%64.7KiB/65.7KiB[============================================================>-]98%65.2KiB/66.2KiB[============================================================>-]98%65.8KiB/66.8KiB[============================================================>-]99%66.4KiB/67.4KiB[============================================================>-]99%67.6KiB/68.6KiB[============================================================>-]99%68.5KiB/69.5KiB[============================================================>-]99%70.0KiB/71.0KiB[============================================================>-]99%70.4KiB/71.4KiB[============================================================>-]99%70.8KiB/71.8KiB[============================================================>-]99%72.3KiB/73.3KiB[============================================================>-]99%73.1KiB/74.1KiB[============================================================>-]99%74.4KiB/75.4KiB[============================================================>-]99%75.7KiB/76.7KiB[============================================================>-]99%78.2KiB/79.2KiB[============================================================>-]99%79.3KiB/80.3KiB[============================================================>-]99%80.1KiB/81.1KiB[============================================================>-]99%81.3KiB/82.3KiB[============================================================>-]99%82.3KiB/83.3KiB[============================================================>-]99%82.6KiB/83.6KiB[============================================================>-]99%84.0KiB/85.0KiB[============================================================>-]99%84.7KiB/85.7KiB[============================================================>-]99%85.9KiB/86.9KiB[============================================================>-]99%87.7KiB/88.7KiB[============================================================>-]99%88.8KiB/89.8KiB[============================================================>-]99%90.3KiB/91.3KiB[============================================================>-]99%91.6KiB/92.6KiB[============================================================>-]99%92.8KiB/93.8KiB[============================================================>-]99%93.5KiB/94.5KiB[============================================================>-]99%93.6KiB/94.6KiB[============================================================>-]99%95.1KiB/96.1KiB[============================================================>-]99%96.4KiB/97.4KiB[============================================================>-]99%97.6KiB/98.6KiB[============================================================>-]99%98.8KiB/99.8KiB[============================================================>-]99%99.7KiB/100.7KiB[============================================================>-]99%100.6KiB/101.6KiB[============================================================>-]99%100.7KiB/101.7KiB[============================================================>-]99%dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/0000755000000000000000000000000015024302472023426 5ustar rootrootdependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/util_bsd.go0000644000000000000000000000021315024302472025556 0ustar rootroot// +build darwin dragonfly freebsd netbsd openbsd package cwriter import "golang.org/x/sys/unix" const ioctlReadTermios = unix.TIOCGETA dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/util_solaris.go0000644000000000000000000000015115024302472026463 0ustar rootroot// +build solaris package cwriter import "golang.org/x/sys/unix" const ioctlReadTermios = unix.TCGETA dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/doc.go0000644000000000000000000000013215024302472024516 0ustar rootroot// Package cwriter is a console writer abstraction for the underlying OS. package cwriter dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/writer_posix.go0000644000000000000000000000106415024302472026514 0ustar rootroot// +build !windows package cwriter import ( "golang.org/x/sys/unix" ) func (w *Writer) clearLines() error { return w.ansiCuuAndEd() } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) if err != nil { return -1, -1, err } return int(ws.Col), int(ws.Row), nil } // IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/writer_windows.go0000644000000000000000000000406715024302472027052 0ustar rootroot// +build windows package cwriter import ( "unsafe" "golang.org/x/sys/windows" ) var kernel32 = windows.NewLazySystemDLL("kernel32.dll") var ( procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") ) func (w *Writer) clearLines() error { if !w.isTerminal { // hope it's cygwin or similar return w.ansiCuuAndEd() } var info windows.ConsoleScreenBufferInfo if err := windows.GetConsoleScreenBufferInfo(windows.Handle(w.fd), &info); err != nil { return err } info.CursorPosition.Y -= int16(w.lineCount) if info.CursorPosition.Y < 0 { info.CursorPosition.Y = 0 } _, _, _ = procSetConsoleCursorPosition.Call( uintptr(w.fd), uintptr(uint32(uint16(info.CursorPosition.Y))<<16|uint32(uint16(info.CursorPosition.X))), ) // clear the lines cursor := &windows.Coord{ X: info.Window.Left, Y: info.CursorPosition.Y, } count := uint32(info.Size.X) * uint32(w.lineCount) _, _, _ = procFillConsoleOutputCharacter.Call( uintptr(w.fd), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(cursor)), uintptr(unsafe.Pointer(new(uint32))), ) return nil } // GetSize returns the visible dimensions of the given terminal. // // These dimensions don't include any scrollback buffer height. func GetSize(fd int) (width, height int, err error) { var info windows.ConsoleScreenBufferInfo if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { return 0, 0, err } // terminal.GetSize from crypto/ssh adds "+ 1" to both width and height: // https://go.googlesource.com/crypto/+/refs/heads/release-branch.go1.14/ssh/terminal/util_windows.go#75 // but looks like this is a root cause of issue #66, so removing both "+ 1" have fixed it. return int(info.Window.Right - info.Window.Left), int(info.Window.Bottom - info.Window.Top), nil } // IsTerminal returns whether the given file descriptor is a terminal. func IsTerminal(fd int) bool { var st uint32 err := windows.GetConsoleMode(windows.Handle(fd), &st) return err == nil } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/util_linux.go0000644000000000000000000000015315024302472026150 0ustar rootroot// +build aix linux package cwriter import "golang.org/x/sys/unix" const ioctlReadTermios = unix.TCGETS dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/cuuAndEd_construction_bench_test.go0000644000000000000000000000141315024302472032454 0ustar rootrootpackage cwriter import ( "bytes" "fmt" "io/ioutil" "strconv" "testing" ) func BenchmarkWithFprintf(b *testing.B) { cuuAndEd := "\x1b[%dA\x1b[J" for i := 0; i < b.N; i++ { fmt.Fprintf(ioutil.Discard, cuuAndEd, 4) } } func BenchmarkWithJoin(b *testing.B) { bCuuAndEd := [][]byte{[]byte("\x1b["), []byte("A\x1b[J")} for i := 0; i < b.N; i++ { ioutil.Discard.Write(bytes.Join(bCuuAndEd, []byte(strconv.Itoa(4)))) } } func BenchmarkWithAppend(b *testing.B) { escOpen := []byte("\x1b[") cuuAndEd := []byte("A\x1b[J") for i := 0; i < b.N; i++ { ioutil.Discard.Write(append(strconv.AppendInt(escOpen, 4, 10), cuuAndEd...)) } } func BenchmarkWithCopy(b *testing.B) { w := New(ioutil.Discard) w.lineCount = 4 for i := 0; i < b.N; i++ { w.ansiCuuAndEd() } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/cwriter/writer.go0000644000000000000000000000351515024302472025275 0ustar rootrootpackage cwriter import ( "bytes" "errors" "io" "os" "strconv" ) // ErrNotTTY not a TeleTYpewriter error. var ErrNotTTY = errors.New("not a terminal") // http://ascii-table.com/ansi-escape-sequences.php const ( escOpen = "\x1b[" cuuAndEd = "A\x1b[J" ) // Writer is a buffered the writer that updates the terminal. The // contents of writer will be flushed when Flush is called. type Writer struct { out io.Writer buf bytes.Buffer lineCount int fd int isTerminal bool } // New returns a new Writer with defaults. func New(out io.Writer) *Writer { w := &Writer{out: out} if f, ok := out.(*os.File); ok { w.fd = int(f.Fd()) w.isTerminal = IsTerminal(w.fd) } return w } // Flush flushes the underlying buffer. func (w *Writer) Flush(lineCount int) (err error) { // some terminals interpret 'cursor up 0' as 'cursor up 1' if w.lineCount > 0 { err = w.clearLines() if err != nil { return } } w.lineCount = lineCount _, err = w.buf.WriteTo(w.out) return } // Write appends the contents of p to the underlying buffer. func (w *Writer) Write(p []byte) (n int, err error) { return w.buf.Write(p) } // WriteString writes string to the underlying buffer. func (w *Writer) WriteString(s string) (n int, err error) { return w.buf.WriteString(s) } // ReadFrom reads from the provided io.Reader and writes to the // underlying buffer. func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { return w.buf.ReadFrom(r) } // GetWidth returns width of underlying terminal. func (w *Writer) GetWidth() (int, error) { if !w.isTerminal { return -1, ErrNotTTY } tw, _, err := GetSize(w.fd) return tw, err } func (w *Writer) ansiCuuAndEd() (err error) { buf := make([]byte, 8) buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lineCount), 10) _, err = w.out.Write(append(buf, cuuAndEd...)) return } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/draw_test.go0000644000000000000000000002052515024302472024276 0ustar rootrootpackage mpb import ( "bytes" "testing" "unicode/utf8" ) func TestDraw(t *testing.T) { // key is termWidth testSuite := map[int][]struct { name string style string total int64 current int64 refill int64 barWidth int trim bool reverse bool want string }{ 0: { { name: "t,c{60,20}", total: 60, current: 20, want: "… ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "", }, }, 1: { { name: "t,c{60,20}", total: 60, current: 20, want: "… ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "", }, }, 2: { { name: "t,c{60,20}", total: 60, current: 20, want: " ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "[]", }, }, 3: { { name: "t,c{60,20}", total: 60, current: 20, want: " ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "[-]", }, }, 4: { { name: "t,c{60,20}", total: 60, current: 20, want: " [] ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "[>-]", }, }, 5: { { name: "t,c{60,20}", total: 60, current: 20, want: " [-] ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "[>--]", }, }, 6: { { name: "t,c{60,20}", total: 60, current: 20, want: " [>-] ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "[>---]", }, }, 7: { { name: "t,c{60,20}", total: 60, current: 20, want: " [>--] ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "[=>---]", }, }, 8: { { name: "t,c{60,20}", total: 60, current: 20, want: " [>---] ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "[=>----]", }, }, 80: { { name: "t,c{60,20}", total: 60, current: 20, want: " [========================>---------------------------------------------------] ", }, { name: "t,c{60,20}trim", total: 60, current: 20, trim: true, want: "[=========================>----------------------------------------------------]", }, { name: "t,c,bw{60,20,60}", total: 60, current: 20, barWidth: 60, want: " [==================>---------------------------------------] ", }, { name: "t,c,bw{60,20,60}trim", total: 60, current: 20, barWidth: 60, trim: true, want: "[==================>---------------------------------------]", }, }, 100: { { name: "t,c{100,0}", total: 100, current: 0, want: " [------------------------------------------------------------------------------------------------] ", }, { name: "t,c{100,0}trim", total: 100, current: 0, trim: true, want: "[--------------------------------------------------------------------------------------------------]", }, { name: "t,c{100,1}", total: 100, current: 1, want: " [>-----------------------------------------------------------------------------------------------] ", }, { name: "t,c{100,1}trim", total: 100, current: 1, trim: true, want: "[>-------------------------------------------------------------------------------------------------]", }, { name: "t,c{100,99}", total: 100, current: 99, want: " [==============================================================================================>-] ", }, { name: "t,c{100,99}trim", total: 100, current: 99, trim: true, want: "[================================================================================================>-]", }, { name: "t,c{100,100}", total: 100, current: 100, want: " [================================================================================================] ", }, { name: "t,c{100,100}trim", total: 100, current: 100, trim: true, want: "[==================================================================================================]", }, { name: "t,c,r{100,100,100}trim", total: 100, current: 100, refill: 100, trim: true, want: "[++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++]", }, { name: "t,c{100,33}", total: 100, current: 33, want: " [===============================>----------------------------------------------------------------] ", }, { name: "t,c{100,33}trim", total: 100, current: 33, trim: true, want: "[===============================>------------------------------------------------------------------]", }, { name: "t,c{100,33}trim,rev", total: 100, current: 33, trim: true, reverse: true, want: "[------------------------------------------------------------------<===============================]", }, { name: "t,c,r{100,33,33}", total: 100, current: 33, refill: 33, want: " [+++++++++++++++++++++++++++++++>----------------------------------------------------------------] ", }, { name: "t,c,r{100,33,33}trim", total: 100, current: 33, refill: 33, trim: true, want: "[+++++++++++++++++++++++++++++++>------------------------------------------------------------------]", }, { name: "t,c,r{100,33,33}trim,rev", total: 100, current: 33, refill: 33, trim: true, reverse: true, want: "[------------------------------------------------------------------<+++++++++++++++++++++++++++++++]", }, { name: "t,c,r{100,40,33}", total: 100, current: 40, refill: 33, want: " [++++++++++++++++++++++++++++++++=====>----------------------------------------------------------] ", }, { name: "t,c,r{100,40,33}trim", total: 100, current: 40, refill: 33, trim: true, want: "[++++++++++++++++++++++++++++++++======>-----------------------------------------------------------]", }, { name: "t,c,r{100,40,33},rev", total: 100, current: 40, refill: 33, reverse: true, want: " [----------------------------------------------------------<=====++++++++++++++++++++++++++++++++] ", }, { name: "t,c,r{100,40,33}trim,rev", total: 100, current: 40, refill: 33, trim: true, reverse: true, want: "[-----------------------------------------------------------<======++++++++++++++++++++++++++++++++]", }, { name: "[=の-] t,c{100,1}", style: "[=の-]", total: 100, current: 1, want: " [の---------------------------------------------------------------------------------------------…] ", }, }, 197: { { name: "t,c,r{97486999,2805950,2805483}trim", total: 97486999, current: 2805950, refill: 2805483, barWidth: 60, trim: true, want: "[+>--------------------------------------------------------]", }, }, } var tmpBuf bytes.Buffer for tw, cases := range testSuite { for _, tc := range cases { s := newTestState(tc.style, tc.reverse) s.reqWidth = tc.barWidth s.total = tc.total s.current = tc.current s.trimSpace = tc.trim s.refill = tc.refill tmpBuf.Reset() tmpBuf.ReadFrom(s.draw(newStatistics(tw, s))) by := tmpBuf.Bytes() got := string(by[:len(by)-1]) if !utf8.ValidString(got) { t.Fail() } if got != tc.want { t.Errorf("termWidth:%d %q want: %q %d, got: %q %d\n", tw, tc.name, tc.want, utf8.RuneCountInString(tc.want), got, utf8.RuneCountInString(got)) } } } } func newTestState(style string, rev bool) *bState { s := &bState{ filler: NewBarFillerPick(style, rev), bufP: new(bytes.Buffer), bufB: new(bytes.Buffer), bufA: new(bytes.Buffer), } return s } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/export_test.go0000644000000000000000000000017415024302472024660 0ustar rootrootpackage mpb // make syncWidth func public in test var SyncWidth = syncWidth var MaxWidthDistributor = &maxWidthDistributor dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decorators_test.go0000644000000000000000000001011515024302472025500 0ustar rootrootpackage mpb_test import ( "sync" "testing" "github.com/vbauerster/mpb/v6" "github.com/vbauerster/mpb/v6/decor" ) func TestNameDecorator(t *testing.T) { tests := []struct { decorator decor.Decorator want string }{ { decorator: decor.Name("Test"), want: "Test", }, { decorator: decor.Name("Test", decor.WC{W: len("Test")}), want: "Test", }, { decorator: decor.Name("Test", decor.WC{W: 10}), want: " Test", }, { decorator: decor.Name("Test", decor.WC{W: 10, C: decor.DidentRight}), want: "Test ", }, } for _, test := range tests { got := test.decorator.Decor(decor.Statistics{}) if got != test.want { t.Errorf("Want: %q, Got: %q\n", test.want, got) } } } type step struct { stat decor.Statistics decorator decor.Decorator want string } func TestPercentageDwidthSync(t *testing.T) { testCases := [][]step{ { { decor.Statistics{Total: 100, Current: 8}, decor.Percentage(decor.WCSyncWidth), "8 %", }, { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncWidth), "9 %", }, }, { { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncWidth), " 9 %", }, { decor.Statistics{Total: 100, Current: 10}, decor.Percentage(decor.WCSyncWidth), "10 %", }, }, { { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncWidth), " 9 %", }, { decor.Statistics{Total: 100, Current: 100}, decor.Percentage(decor.WCSyncWidth), "100 %", }, }, } testDecoratorConcurrently(t, testCases) } func TestPercentageDwidthSyncDidentRight(t *testing.T) { testCases := [][]step{ { { decor.Statistics{Total: 100, Current: 8}, decor.Percentage(decor.WCSyncWidthR), "8 %", }, { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncWidthR), "9 %", }, }, { { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncWidthR), "9 % ", }, { decor.Statistics{Total: 100, Current: 10}, decor.Percentage(decor.WCSyncWidthR), "10 %", }, }, { { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncWidthR), "9 % ", }, { decor.Statistics{Total: 100, Current: 100}, decor.Percentage(decor.WCSyncWidthR), "100 %", }, }, } testDecoratorConcurrently(t, testCases) } func TestPercentageDSyncSpace(t *testing.T) { testCases := [][]step{ { { decor.Statistics{Total: 100, Current: 8}, decor.Percentage(decor.WCSyncSpace), " 8 %", }, { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncSpace), " 9 %", }, }, { { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncSpace), " 9 %", }, { decor.Statistics{Total: 100, Current: 10}, decor.Percentage(decor.WCSyncSpace), " 10 %", }, }, { { decor.Statistics{Total: 100, Current: 9}, decor.Percentage(decor.WCSyncSpace), " 9 %", }, { decor.Statistics{Total: 100, Current: 100}, decor.Percentage(decor.WCSyncSpace), " 100 %", }, }, } testDecoratorConcurrently(t, testCases) } func testDecoratorConcurrently(t *testing.T, testCases [][]step) { if len(testCases) == 0 { t.Fail() } for _, columnCase := range testCases { mpb.SyncWidth(toSyncMatrix(columnCase)) numBars := len(columnCase) gott := make([]chan string, numBars) wg := new(sync.WaitGroup) wg.Add(numBars) for i, step := range columnCase { step := step ch := make(chan string, 1) go func() { defer wg.Done() ch <- step.decorator.Decor(step.stat) }() gott[i] = ch } wg.Wait() for i, ch := range gott { got := <-ch want := columnCase[i].want if got != want { t.Errorf("Want: %q, Got: %q\n", want, got) } } } } func toSyncMatrix(ss []step) map[int][]chan int { var column []chan int for _, s := range ss { if ch, ok := s.decorator.Sync(); ok { column = append(column, ch) } } return map[int][]chan int{0: column} } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/bar.go0000644000000000000000000002765015024302472023054 0ustar rootrootpackage mpb import ( "bytes" "context" "fmt" "io" "log" "runtime/debug" "strings" "time" "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" "github.com/vbauerster/mpb/v6/decor" ) // Bar represents a progress bar. type Bar struct { priority int // used by heap index int // used by heap extendedLines int toShutdown bool toDrop bool noPop bool hasEwmaDecorators bool operateState chan func(*bState) frameCh chan io.Reader syncTableCh chan [][]chan int completed chan bool // cancel is called either by user or on complete event cancel func() // done is closed after cacheState is assigned done chan struct{} // cacheState is populated, right after close(shutdown) cacheState *bState container *Progress dlogger *log.Logger recoveredPanic interface{} } type extenderFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int) // bState is actual bar state. It gets passed to *Bar.serve(...) monitor // goroutine. type bState struct { id int priority int reqWidth int total int64 current int64 refill int64 lastN int64 iterated bool trimSpace bool completed bool completeFlushed bool triggerComplete bool dropOnComplete bool noPop bool aDecorators []decor.Decorator pDecorators []decor.Decorator averageDecorators []decor.AverageDecorator ewmaDecorators []decor.EwmaDecorator shutdownListeners []decor.ShutdownListener bufP, bufB, bufA *bytes.Buffer filler BarFiller middleware func(BarFiller) BarFiller extender extenderFunc // runningBar is a key for *pState.parkedBars runningBar *Bar debugOut io.Writer } func newBar(container *Progress, bs *bState) *Bar { logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id) ctx, cancel := context.WithCancel(container.ctx) bar := &Bar{ container: container, priority: bs.priority, toDrop: bs.dropOnComplete, noPop: bs.noPop, operateState: make(chan func(*bState)), frameCh: make(chan io.Reader, 1), syncTableCh: make(chan [][]chan int, 1), completed: make(chan bool, 1), done: make(chan struct{}), cancel: cancel, dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile), } go bar.serve(ctx, bs) return bar } // ProxyReader wraps r with metrics required for progress tracking. // Panics if r is nil. func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { if r == nil { panic("expected non nil io.Reader") } return newProxyReader(r, b) } // ID returs id of the bar. func (b *Bar) ID() int { result := make(chan int) select { case b.operateState <- func(s *bState) { result <- s.id }: return <-result case <-b.done: return b.cacheState.id } } // Current returns bar's current number, in other words sum of all increments. func (b *Bar) Current() int64 { result := make(chan int64) select { case b.operateState <- func(s *bState) { result <- s.current }: return <-result case <-b.done: return b.cacheState.current } } // SetRefill sets refill flag with specified amount. // The underlying BarFiller will change its visual representation, to // indicate refill event. Refill event may be referred to some retry // operation for example. func (b *Bar) SetRefill(amount int64) { select { case b.operateState <- func(s *bState) { s.refill = amount }: case <-b.done: } } // TraverseDecorators traverses all available decorators and calls cb func on each. func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { select { case b.operateState <- func(s *bState) { for _, decorators := range [...][]decor.Decorator{ s.pDecorators, s.aDecorators, } { for _, d := range decorators { cb(extractBaseDecorator(d)) } } }: case <-b.done: } } // SetTotal sets total dynamically. // If total is less than or equal to zero it takes progress' current value. func (b *Bar) SetTotal(total int64, triggerComplete bool) { select { case b.operateState <- func(s *bState) { s.triggerComplete = triggerComplete if total <= 0 { s.total = s.current } else { s.total = total } if s.triggerComplete && !s.completed { s.current = s.total s.completed = true go b.refreshTillShutdown() } }: case <-b.done: } } // SetCurrent sets progress' current to an arbitrary value. // Setting a negative value will cause a panic. func (b *Bar) SetCurrent(current int64) { select { case b.operateState <- func(s *bState) { s.iterated = true s.lastN = current - s.current s.current = current if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true go b.refreshTillShutdown() } }: case <-b.done: } } // Increment is a shorthand for b.IncrInt64(1). func (b *Bar) Increment() { b.IncrInt64(1) } // IncrBy is a shorthand for b.IncrInt64(int64(n)). func (b *Bar) IncrBy(n int) { b.IncrInt64(int64(n)) } // IncrInt64 increments progress by amount of n. func (b *Bar) IncrInt64(n int64) { select { case b.operateState <- func(s *bState) { s.iterated = true s.lastN = n s.current += n if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true go b.refreshTillShutdown() } }: case <-b.done: } } // DecoratorEwmaUpdate updates all EWMA based decorators. Should be // called on each iteration, because EWMA's unit of measure is an // iteration's duration. Panics if called before *Bar.Incr... family // methods. func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { select { case b.operateState <- func(s *bState) { ewmaIterationUpdate(false, s, dur) }: case <-b.done: ewmaIterationUpdate(true, b.cacheState, dur) } } // DecoratorAverageAdjust adjusts all average based decorators. Call // if you need to adjust start time of all average based decorators // or after progress resume. func (b *Bar) DecoratorAverageAdjust(start time.Time) { select { case b.operateState <- func(s *bState) { for _, d := range s.averageDecorators { d.AverageAdjust(start) } }: case <-b.done: } } // SetPriority changes bar's order among multiple bars. Zero is highest // priority, i.e. bar will be on top. If you don't need to set priority // dynamically, better use BarPriority option. func (b *Bar) SetPriority(priority int) { select { case <-b.done: default: b.container.setBarPriority(b, priority) } } // Abort interrupts bar's running goroutine. Call this, if you'd like // to stop/remove bar before completion event. It has no effect after // completion event. If drop is true bar will be removed as well. func (b *Bar) Abort(drop bool) { select { case <-b.done: default: if drop { b.container.dropBar(b) } b.cancel() } } // Completed reports whether the bar is in completed state. func (b *Bar) Completed() bool { select { case b.operateState <- func(s *bState) { b.completed <- s.completed }: return <-b.completed case <-b.done: return true } } func (b *Bar) serve(ctx context.Context, s *bState) { defer b.container.bwg.Done() for { select { case op := <-b.operateState: op(s) case <-ctx.Done(): b.cacheState = s close(b.done) // Notifying decorators about shutdown event for _, sl := range s.shutdownListeners { sl.Shutdown() } return } } } func (b *Bar) render(tw int) { select { case b.operateState <- func(s *bState) { stat := newStatistics(tw, s) defer func() { // recovering if user defined decorator panics for example if p := recover(); p != nil { if b.recoveredPanic == nil { s.extender = makePanicExtender(p) b.toShutdown = !b.toShutdown b.recoveredPanic = p } frame, lines := s.extender(nil, s.reqWidth, stat) b.extendedLines = lines b.frameCh <- frame b.dlogger.Println(p) } s.completeFlushed = s.completed }() frame, lines := s.extender(s.draw(stat), s.reqWidth, stat) b.extendedLines = lines b.toShutdown = s.completed && !s.completeFlushed b.frameCh <- frame }: case <-b.done: s := b.cacheState stat := newStatistics(tw, s) var r io.Reader if b.recoveredPanic == nil { r = s.draw(stat) } frame, lines := s.extender(r, s.reqWidth, stat) b.extendedLines = lines b.frameCh <- frame } } func (b *Bar) subscribeDecorators() { var averageDecorators []decor.AverageDecorator var ewmaDecorators []decor.EwmaDecorator var shutdownListeners []decor.ShutdownListener b.TraverseDecorators(func(d decor.Decorator) { if d, ok := d.(decor.AverageDecorator); ok { averageDecorators = append(averageDecorators, d) } if d, ok := d.(decor.EwmaDecorator); ok { ewmaDecorators = append(ewmaDecorators, d) } if d, ok := d.(decor.ShutdownListener); ok { shutdownListeners = append(shutdownListeners, d) } }) select { case b.operateState <- func(s *bState) { s.averageDecorators = averageDecorators s.ewmaDecorators = ewmaDecorators s.shutdownListeners = shutdownListeners }: b.hasEwmaDecorators = len(ewmaDecorators) != 0 case <-b.done: } } func (b *Bar) refreshTillShutdown() { for { select { case b.container.refreshCh <- time.Now(): case <-b.done: return } } } func (b *Bar) wSyncTable() [][]chan int { select { case b.operateState <- func(s *bState) { b.syncTableCh <- s.wSyncTable() }: return <-b.syncTableCh case <-b.done: return b.cacheState.wSyncTable() } } func (s *bState) draw(stat decor.Statistics) io.Reader { if !s.trimSpace { stat.AvailableWidth -= 2 s.bufB.WriteByte(' ') defer s.bufB.WriteByte(' ') } nlr := strings.NewReader("\n") tw := stat.AvailableWidth for _, d := range s.pDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) s.bufP.WriteString(str) } if stat.AvailableWidth <= 0 { trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…")) s.bufP.Reset() return io.MultiReader(trunc, s.bufB, nlr) } tw = stat.AvailableWidth for _, d := range s.aDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) s.bufA.WriteString(str) } if stat.AvailableWidth <= 0 { trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…")) s.bufA.Reset() return io.MultiReader(s.bufP, s.bufB, trunc, nlr) } s.filler.Fill(s.bufB, s.reqWidth, stat) return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr) } func (s *bState) wSyncTable() [][]chan int { columns := make([]chan int, 0, len(s.pDecorators)+len(s.aDecorators)) var pCount int for _, d := range s.pDecorators { if ch, ok := d.Sync(); ok { columns = append(columns, ch) pCount++ } } var aCount int for _, d := range s.aDecorators { if ch, ok := d.Sync(); ok { columns = append(columns, ch) aCount++ } } table := make([][]chan int, 2) table[0] = columns[0:pCount] table[1] = columns[pCount : pCount+aCount : pCount+aCount] return table } func newStatistics(tw int, s *bState) decor.Statistics { return decor.Statistics{ ID: s.id, AvailableWidth: tw, Total: s.total, Current: s.current, Refill: s.refill, Completed: s.completeFlushed, } } func extractBaseDecorator(d decor.Decorator) decor.Decorator { if d, ok := d.(decor.Wrapper); ok { return extractBaseDecorator(d.Base()) } return d } func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) { if !done && !s.iterated { panic("increment required before ewma iteration update") } else { s.iterated = false } for _, d := range s.ewmaDecorators { d.EwmaUpdate(s.lastN, dur) } } func makePanicExtender(p interface{}) extenderFunc { pstr := fmt.Sprint(p) stack := debug.Stack() stackLines := bytes.Count(stack, []byte("\n")) return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) { mr := io.MultiReader( strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")), strings.NewReader(fmt.Sprintf("\n%#v\n", st)), bytes.NewReader(stack), ) return mr, stackLines + 1 } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/_examples/0000755000000000000000000000000015024302472023724 5ustar rootrootdependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/_examples/.gitignore0000644000000000000000000000000715024302472025711 0ustar rootrootgo.sum dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/priority_queue.go0000644000000000000000000000113315024302472025361 0ustar rootrootpackage mpb // A priorityQueue implements heap.Interface type priorityQueue []*Bar func (pq priorityQueue) Len() int { return len(pq) } func (pq priorityQueue) Less(i, j int) bool { return pq[i].priority < pq[j].priority } func (pq priorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] pq[i].index = i pq[j].index = j } func (pq *priorityQueue) Push(x interface{}) { s := *pq bar := x.(*Bar) bar.index = len(s) s = append(s, bar) *pq = s } func (pq *priorityQueue) Pop() interface{} { s := *pq *pq = s[0 : len(s)-1] bar := s[len(s)-1] bar.index = -1 // for safety return bar } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/progress_test.go0000644000000000000000000000740115024302472025203 0ustar rootrootpackage mpb_test import ( "bytes" "context" "io/ioutil" "math/rand" "sync" "testing" "time" "github.com/vbauerster/mpb/v6" "github.com/vbauerster/mpb/v6/decor" ) func init() { rand.Seed(time.Now().UnixNano()) } func TestBarCount(t *testing.T) { p := mpb.New(mpb.WithOutput(ioutil.Discard)) var wg sync.WaitGroup wg.Add(1) b := p.AddBar(100) go func() { rng := rand.New(rand.NewSource(time.Now().UnixNano())) for i := 0; i < 100; i++ { if i == 33 { wg.Done() } b.Increment() time.Sleep((time.Duration(rng.Intn(10)+1) * (10 * time.Millisecond)) / 2) } }() wg.Wait() count := p.BarCount() if count != 1 { t.Errorf("BarCount want: %q, got: %q\n", 1, count) } b.Abort(true) p.Wait() } func TestBarAbort(t *testing.T) { p := mpb.New(mpb.WithOutput(ioutil.Discard)) var wg sync.WaitGroup wg.Add(1) bars := make([]*mpb.Bar, 3) for i := 0; i < 3; i++ { b := p.AddBar(100) rng := rand.New(rand.NewSource(time.Now().UnixNano())) go func(n int) { for i := 0; !b.Completed(); i++ { if n == 0 && i >= 33 { b.Abort(true) wg.Done() } b.Increment() time.Sleep((time.Duration(rng.Intn(10)+1) * (10 * time.Millisecond)) / 2) } }(i) bars[i] = b } wg.Wait() count := p.BarCount() if count != 2 { t.Errorf("BarCount want: %q, got: %q\n", 2, count) } bars[1].Abort(true) bars[2].Abort(true) p.Wait() } func TestWithContext(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) shutdown := make(chan struct{}) p := mpb.NewWithContext(ctx, mpb.WithOutput(ioutil.Discard), mpb.WithRefreshRate(50*time.Millisecond), mpb.WithShutdownNotifier(shutdown), ) total := 10000 numBars := 3 bars := make([]*mpb.Bar, 0, numBars) for i := 0; i < numBars; i++ { bar := p.AddBar(int64(total)) bars = append(bars, bar) go func() { for !bar.Completed() { bar.Increment() time.Sleep(randomDuration(100 * time.Millisecond)) } }() } time.Sleep(50 * time.Millisecond) cancel() p.Wait() select { case <-shutdown: case <-time.After(100 * time.Millisecond): t.Error("Progress didn't stop") } } // MaxWidthDistributor shouldn't stuck in the middle while removing or aborting a bar func TestMaxWidthDistributor(t *testing.T) { makeWrapper := func(f func([]chan int), start, end chan struct{}) func([]chan int) { return func(column []chan int) { start <- struct{}{} f(column) <-end } } ready := make(chan struct{}) start := make(chan struct{}) end := make(chan struct{}) *mpb.MaxWidthDistributor = makeWrapper(*mpb.MaxWidthDistributor, start, end) total := 80 numBars := 3 p := mpb.New(mpb.WithOutput(ioutil.Discard)) for i := 0; i < numBars; i++ { bar := p.AddBar(int64(total), mpb.BarOptional(mpb.BarRemoveOnComplete(), i == 0), mpb.PrependDecorators( decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WCSyncSpace), ), ) go func() { <-ready rng := rand.New(rand.NewSource(time.Now().UnixNano())) for i := 0; i < total; i++ { start := time.Now() if bar.ID() >= numBars-1 && i >= 42 { bar.Abort(true) } time.Sleep((time.Duration(rng.Intn(10)+1) * (10 * time.Millisecond)) / 2) bar.Increment() bar.DecoratorEwmaUpdate(time.Since(start)) } }() } go func() { <-ready p.Wait() close(start) }() res := t.Run("maxWidthDistributor", func(t *testing.T) { close(ready) for v := range start { timer := time.NewTimer(100 * time.Millisecond) select { case end <- v: timer.Stop() case <-timer.C: t.FailNow() } } }) if !res { t.Error("maxWidthDistributor stuck in the middle") } } func getLastLine(bb []byte) []byte { split := bytes.Split(bb, []byte("\n")) return split[len(split)-2] } func randomDuration(max time.Duration) time.Duration { return time.Duration(rand.Intn(10)+1) * max / 10 } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/go.mod0000644000000000000000000000043315024302472023055 0ustar rootrootmodule github.com/vbauerster/mpb/v6 require ( github.com/VividCortex/ewma v1.2.0 github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/mattn/go-runewidth v0.0.12 github.com/rivo/uniseg v0.2.0 golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 ) go 1.14 dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/doc.go0000644000000000000000000000013615024302472023043 0ustar rootroot// Package mpb is a library for rendering progress bars in terminal applications. package mpb dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/progress.go0000644000000000000000000002203115024302472024140 0ustar rootrootpackage mpb import ( "bytes" "container/heap" "context" "fmt" "io" "io/ioutil" "log" "math" "os" "sync" "time" "github.com/vbauerster/mpb/v6/cwriter" "github.com/vbauerster/mpb/v6/decor" ) const ( // default RefreshRate prr = 120 * time.Millisecond ) // Progress represents a container that renders one or more progress // bars. type Progress struct { ctx context.Context uwg *sync.WaitGroup cwg *sync.WaitGroup bwg *sync.WaitGroup operateState chan func(*pState) done chan struct{} refreshCh chan time.Time once sync.Once dlogger *log.Logger } // pState holds bars in its priorityQueue. It gets passed to // *Progress.serve(...) monitor goroutine. type pState struct { bHeap priorityQueue heapUpdated bool pMatrix map[int][]chan int aMatrix map[int][]chan int barShutdownQueue []*Bar // following are provided/overrided by user idCount int reqWidth int popCompleted bool outputDiscarded bool rr time.Duration uwg *sync.WaitGroup externalRefresh <-chan interface{} renderDelay <-chan struct{} shutdownNotifier chan struct{} parkedBars map[*Bar]*Bar output io.Writer debugOut io.Writer } // New creates new Progress container instance. It's not possible to // reuse instance after *Progress.Wait() method has been called. func New(options ...ContainerOption) *Progress { return NewWithContext(context.Background(), options...) } // NewWithContext creates new Progress container instance with provided // context. It's not possible to reuse instance after *Progress.Wait() // method has been called. func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { s := &pState{ bHeap: priorityQueue{}, rr: prr, parkedBars: make(map[*Bar]*Bar), output: os.Stdout, debugOut: ioutil.Discard, } for _, opt := range options { if opt != nil { opt(s) } } p := &Progress{ ctx: ctx, uwg: s.uwg, cwg: new(sync.WaitGroup), bwg: new(sync.WaitGroup), operateState: make(chan func(*pState)), done: make(chan struct{}), dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile), } p.cwg.Add(1) go p.serve(s, cwriter.New(s.output)) return p } // AddBar creates a bar with default bar filler. Different filler can // be choosen and applied via `*Progress.Add(...) *Bar` method. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { return p.Add(total, NewBarFiller(BarDefaultStyle), options...) } // AddSpinner creates a bar with default spinner filler. Different // filler can be choosen and applied via `*Progress.Add(...) *Bar` // method. func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar { return p.Add(total, NewSpinnerFiller(SpinnerDefaultStyle, alignment), options...) } // Add creates a bar which renders itself by provided filler. // If `total <= 0` trigger complete event is disabled until reset with *bar.SetTotal(int64, bool). // Panics if *Progress instance is done, i.e. called after *Progress.Wait(). func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar { if filler == nil { filler = BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) } p.bwg.Add(1) result := make(chan *Bar) select { case p.operateState <- func(ps *pState) { bs := ps.makeBarState(total, filler, options...) bar := newBar(p, bs) if bs.runningBar != nil { bs.runningBar.noPop = true ps.parkedBars[bs.runningBar] = bar } else { heap.Push(&ps.bHeap, bar) ps.heapUpdated = true } ps.idCount++ result <- bar }: bar := <-result bar.subscribeDecorators() return bar case <-p.done: p.bwg.Done() panic(fmt.Sprintf("%T instance can't be reused after it's done!", p)) } } func (p *Progress) dropBar(b *Bar) { select { case p.operateState <- func(s *pState) { if b.index < 0 { return } heap.Remove(&s.bHeap, b.index) s.heapUpdated = true }: case <-p.done: } } func (p *Progress) setBarPriority(b *Bar, priority int) { select { case p.operateState <- func(s *pState) { if b.index < 0 { return } b.priority = priority heap.Fix(&s.bHeap, b.index) }: case <-p.done: } } // UpdateBarPriority same as *Bar.SetPriority(int). func (p *Progress) UpdateBarPriority(b *Bar, priority int) { p.setBarPriority(b, priority) } // BarCount returns bars count. func (p *Progress) BarCount() int { result := make(chan int, 1) select { case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }: return <-result case <-p.done: return 0 } } // Wait waits for all bars to complete and finally shutdowns container. // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { if p.uwg != nil { // wait for user wg p.uwg.Wait() } // wait for bars to quit, if any p.bwg.Wait() p.once.Do(p.shutdown) // wait for container to quit p.cwg.Wait() } func (p *Progress) shutdown() { close(p.done) } func (p *Progress) serve(s *pState, cw *cwriter.Writer) { defer p.cwg.Done() p.refreshCh = s.newTicker(p.done) for { select { case op := <-p.operateState: op(s) case <-p.refreshCh: if err := s.render(cw); err != nil { p.dlogger.Println(err) } case <-s.shutdownNotifier: if s.heapUpdated { if err := s.render(cw); err != nil { p.dlogger.Println(err) } } return } } } func (s *pState) newTicker(done <-chan struct{}) chan time.Time { ch := make(chan time.Time) if s.shutdownNotifier == nil { s.shutdownNotifier = make(chan struct{}) } go func() { if s.renderDelay != nil { <-s.renderDelay } var internalRefresh <-chan time.Time if !s.outputDiscarded { if s.externalRefresh == nil { ticker := time.NewTicker(s.rr) defer ticker.Stop() internalRefresh = ticker.C } } else { s.externalRefresh = nil } for { select { case t := <-internalRefresh: ch <- t case x := <-s.externalRefresh: if t, ok := x.(time.Time); ok { ch <- t } else { ch <- time.Now() } case <-done: close(s.shutdownNotifier) return } } }() return ch } func (s *pState) render(cw *cwriter.Writer) error { if s.heapUpdated { s.updateSyncMatrix() s.heapUpdated = false } syncWidth(s.pMatrix) syncWidth(s.aMatrix) tw, err := cw.GetWidth() if err != nil { tw = s.reqWidth } for i := 0; i < s.bHeap.Len(); i++ { bar := s.bHeap[i] go bar.render(tw) } return s.flush(cw) } func (s *pState) flush(cw *cwriter.Writer) error { var lineCount int bm := make(map[*Bar]struct{}, s.bHeap.Len()) for s.bHeap.Len() > 0 { b := heap.Pop(&s.bHeap).(*Bar) cw.ReadFrom(<-b.frameCh) if b.toShutdown { if b.recoveredPanic != nil { s.barShutdownQueue = append(s.barShutdownQueue, b) b.toShutdown = false } else { // shutdown at next flush // this ensures no bar ends up with less than 100% rendered defer func() { s.barShutdownQueue = append(s.barShutdownQueue, b) }() } } lineCount += b.extendedLines + 1 bm[b] = struct{}{} } for _, b := range s.barShutdownQueue { if parkedBar := s.parkedBars[b]; parkedBar != nil { parkedBar.priority = b.priority heap.Push(&s.bHeap, parkedBar) delete(s.parkedBars, b) b.toDrop = true } if s.popCompleted && !b.noPop { lineCount -= b.extendedLines + 1 b.toDrop = true } if b.toDrop { delete(bm, b) s.heapUpdated = true } b.cancel() } s.barShutdownQueue = s.barShutdownQueue[0:0] for b := range bm { heap.Push(&s.bHeap, b) } return cw.Flush(lineCount) } func (s *pState) updateSyncMatrix() { s.pMatrix = make(map[int][]chan int) s.aMatrix = make(map[int][]chan int) for i := 0; i < s.bHeap.Len(); i++ { bar := s.bHeap[i] table := bar.wSyncTable() pRow, aRow := table[0], table[1] for i, ch := range pRow { s.pMatrix[i] = append(s.pMatrix[i], ch) } for i, ch := range aRow { s.aMatrix[i] = append(s.aMatrix[i], ch) } } } func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { bs := &bState{ id: s.idCount, priority: s.idCount, reqWidth: s.reqWidth, total: total, filler: filler, extender: func(r io.Reader, _ int, _ decor.Statistics) (io.Reader, int) { return r, 0 }, debugOut: s.debugOut, } if total > 0 { bs.triggerComplete = true } for _, opt := range options { if opt != nil { opt(bs) } } if bs.middleware != nil { bs.filler = bs.middleware(filler) bs.middleware = nil } if s.popCompleted && !bs.noPop { bs.priority = -(math.MaxInt32 - s.idCount) } bs.bufP = bytes.NewBuffer(make([]byte, 0, 128)) bs.bufB = bytes.NewBuffer(make([]byte, 0, 256)) bs.bufA = bytes.NewBuffer(make([]byte, 0, 128)) return bs } func syncWidth(matrix map[int][]chan int) { for _, column := range matrix { go maxWidthDistributor(column) } } var maxWidthDistributor = func(column []chan int) { var maxWidth int for _, ch := range column { if w := <-ch; w > maxWidth { maxWidth = w } } for _, ch := range column { ch <- maxWidth } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/internal/0000755000000000000000000000000015024302472023563 5ustar rootrootdependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/internal/percentage_test.go0000644000000000000000000000411215024302472027264 0ustar rootrootpackage internal import "testing" func TestPercentage(t *testing.T) { // key is barWidth testSuite := map[int][]struct { name string total int64 current int64 expected int64 }{ 100: { {"t,c,e{-1,-1,0}", -1, -1, 0}, {"t,c,e{0,-1,0}", 0, -1, 0}, {"t,c,e{0,0,0}", 0, 0, 0}, {"t,c,e{0,1,0}", 0, 1, 0}, {"t,c,e{100,0,0}", 100, 0, 0}, {"t,c,e{100,10,10}", 100, 10, 10}, {"t,c,e{100,15,15}", 100, 15, 15}, {"t,c,e{100,50,50}", 100, 50, 50}, {"t,c,e{100,99,99}", 100, 99, 99}, {"t,c,e{100,100,100}", 100, 100, 100}, {"t,c,e{100,101,101}", 100, 101, 100}, {"t,c,e{120,0,0}", 120, 0, 0}, {"t,c,e{120,10,8}", 120, 10, 8}, {"t,c,e{120,15,13}", 120, 15, 13}, {"t,c,e{120,50,42}", 120, 50, 42}, {"t,c,e{120,60,50}", 120, 60, 50}, {"t,c,e{120,99,83}", 120, 99, 83}, {"t,c,e{120,101,84}", 120, 101, 84}, {"t,c,e{120,118,98}", 120, 118, 98}, {"t,c,e{120,119,99}", 120, 119, 99}, {"t,c,e{120,120,100}", 120, 120, 100}, {"t,c,e{120,121,101}", 120, 121, 100}, }, 80: { {"t,c,e{-1,-1,0}", -1, -1, 0}, {"t,c,e{0,-1,0}", 0, -1, 0}, {"t,c,e{0,0,0}", 0, 0, 0}, {"t,c,e{0,1,0}", 0, 1, 0}, {"t,c,e{100,0,0}", 100, 0, 0}, {"t,c,e{100,10,8}", 100, 10, 8}, {"t,c,e{100,15,12}", 100, 15, 12}, {"t,c,e{100,50,40}", 100, 50, 40}, {"t,c,e{100,99,79}", 100, 99, 79}, {"t,c,e{100,100,80}", 100, 100, 80}, {"t,c,e{100,101,81}", 100, 101, 80}, {"t,c,e{120,0,0}", 120, 0, 0}, {"t,c,e{120,10,7}", 120, 10, 7}, {"t,c,e{120,15,10}", 120, 15, 10}, {"t,c,e{120,50,33}", 120, 50, 33}, {"t,c,e{120,60,40}", 120, 60, 40}, {"t,c,e{120,99,66}", 120, 99, 66}, {"t,c,e{120,101,67}", 120, 101, 67}, {"t,c,e{120,118,79}", 120, 118, 79}, {"t,c,e{120,119,79}", 120, 119, 79}, {"t,c,e{120,120,80}", 120, 120, 80}, {"t,c,e{120,121,81}", 120, 121, 80}, }, } for width, cases := range testSuite { for _, tc := range cases { got := int64(PercentageRound(tc.total, tc.current, width)) if got != tc.expected { t.Errorf("width %d; %s: Expected: %d, got: %d\n", width, tc.name, tc.expected, got) } } } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/internal/width.go0000644000000000000000000000037315024302472025234 0ustar rootrootpackage internal // CheckRequestedWidth checks that requested width doesn't overflow // available width func CheckRequestedWidth(requested, available int) int { if requested <= 0 || requested >= available { return available } return requested } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/internal/percentage.go0000644000000000000000000000072715024302472026235 0ustar rootrootpackage internal import "math" // Percentage is a helper function, to calculate percentage. func Percentage(total, current int64, width int) float64 { if total <= 0 { return 0 } if current >= total { return float64(width) } return float64(int64(width)*current) / float64(total) } // PercentageRound same as Percentage but with math.Round. func PercentageRound(total, current int64, width int) float64 { return math.Round(Percentage(total, current, width)) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/internal/predicate.go0000644000000000000000000000020615024302472026050 0ustar rootrootpackage internal // Predicate helper for internal use. func Predicate(pick bool) func() bool { return func() bool { return pick } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/README.md0000644000000000000000000001004515024302472023226 0ustar rootroot# Multi Progress Bar [![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v6) [![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb) [![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb) **mpb** is a Go lib for rendering progress bars in terminal applications. ## Features - **Multiple Bars**: Multiple progress bars are supported - **Dynamic Total**: Set total while bar is running - **Dynamic Add/Remove**: Dynamically add or remove bars - **Cancellation**: Cancel whole rendering process - **Predefined Decorators**: Elapsed time, [ewma](https://github.com/VividCortex/ewma) based ETA, Percentage, Bytes counter - **Decorator's width sync**: Synchronized decorator's width among multiple bars ## Usage #### [Rendering single bar](_examples/singleBar/main.go) ```go package main import ( "math/rand" "time" "github.com/vbauerster/mpb/v6" "github.com/vbauerster/mpb/v6/decor" ) func main() { // initialize progress container, with custom width p := mpb.New(mpb.WithWidth(64)) total := 100 name := "Single Bar:" // adding a single bar, which will inherit container's width bar := p.Add(int64(total), // progress bar filler with customized style mpb.NewBarFiller("╢▌▌░╟"), mpb.PrependDecorators( // display our name with one space on the right decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), // replace ETA decorator with "done" message, OnComplete event decor.OnComplete( decor.AverageETA(decor.ET_STYLE_GO, decor.WC{W: 4}), "done", ), ), mpb.AppendDecorators(decor.Percentage()), ) // simulating some work max := 100 * time.Millisecond for i := 0; i < total; i++ { time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10) bar.Increment() } // wait for our bar to complete and flush p.Wait() } ``` #### [Rendering multiple bars](_examples/multiBars/main.go) ```go var wg sync.WaitGroup // pass &wg (optional), so p will wait for it eventually p := mpb.New(mpb.WithWaitGroup(&wg)) total, numBars := 100, 3 wg.Add(numBars) for i := 0; i < numBars; i++ { name := fmt.Sprintf("Bar#%d:", i) bar := p.AddBar(int64(total), mpb.PrependDecorators( // simple name decorator decor.Name(name), // decor.DSyncWidth bit enables column width synchronization decor.Percentage(decor.WCSyncSpace), ), mpb.AppendDecorators( // replace ETA decorator with "done" message, OnComplete event decor.OnComplete( // ETA decorator with ewma age of 60 decor.EwmaETA(decor.ET_STYLE_GO, 60), "done", ), ), ) // simulating some work go func() { defer wg.Done() rng := rand.New(rand.NewSource(time.Now().UnixNano())) max := 100 * time.Millisecond for i := 0; i < total; i++ { // start variable is solely for EWMA calculation // EWMA's unit of measure is an iteration's duration start := time.Now() time.Sleep(time.Duration(rng.Intn(10)+1) * max / 10) bar.Increment() // we need to call DecoratorEwmaUpdate to fulfill ewma decorator's contract bar.DecoratorEwmaUpdate(time.Since(start)) } }() } // Waiting for passed &wg and for all bars to complete and flush p.Wait() ``` #### [Dynamic total](_examples/dynTotal/main.go) ![dynamic total](_svg/godEMrCZmJkHYH1X9dN4Nm0U7.svg) #### [Complex example](_examples/complex/main.go) ![complex](_svg/wHzf1M7sd7B3zVa2scBMnjqRf.svg) #### [Bytes counters](_examples/io/main.go) ![byte counters](_svg/hIpTa3A5rQz65ssiVuRJu87X6.svg) dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/bar_filler_spinner.go0000644000000000000000000000313715024302472026141 0ustar rootrootpackage mpb import ( "io" "strings" "github.com/mattn/go-runewidth" "github.com/vbauerster/mpb/v6/decor" "github.com/vbauerster/mpb/v6/internal" ) // SpinnerAlignment enum. type SpinnerAlignment int // SpinnerAlignment kinds. const ( SpinnerOnLeft SpinnerAlignment = iota SpinnerOnMiddle SpinnerOnRight ) // SpinnerDefaultStyle is a style for rendering a spinner. var SpinnerDefaultStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} type spinnerFiller struct { frames []string count uint alignment SpinnerAlignment } // NewSpinnerFiller returns a BarFiller implementation which renders // a spinner. If style is nil or zero length, SpinnerDefaultStyle is // applied. To be used with `*Progress.Add(...) *Bar` method. func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller { if len(style) == 0 { style = SpinnerDefaultStyle } filler := &spinnerFiller{ frames: style, alignment: alignment, } return filler } func (s *spinnerFiller) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { width := internal.CheckRequestedWidth(reqWidth, stat.AvailableWidth) frame := s.frames[s.count%uint(len(s.frames))] frameWidth := runewidth.StringWidth(frame) if width < frameWidth { return } switch rest := width - frameWidth; s.alignment { case SpinnerOnLeft: io.WriteString(w, frame+strings.Repeat(" ", rest)) case SpinnerOnMiddle: str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2) io.WriteString(w, str) case SpinnerOnRight: io.WriteString(w, strings.Repeat(" ", rest)+frame) } s.count++ } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/bar_filler.go0000644000000000000000000000164315024302472024403 0ustar rootrootpackage mpb import ( "io" "github.com/vbauerster/mpb/v6/decor" ) // BarFiller interface. // Bar (without decorators) renders itself by calling BarFiller's Fill method. // // reqWidth is requested width, set by `func WithWidth(int) ContainerOption`. // If not set, it defaults to terminal width. // // Default implementations can be obtained via: // // func NewBarFiller(style string) BarFiller // func NewBarFillerRev(style string) BarFiller // func NewBarFillerPick(style string, rev bool) BarFiller // func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller // type BarFiller interface { Fill(w io.Writer, reqWidth int, stat decor.Statistics) } // BarFillerFunc is function type adapter to convert function into BarFiller. type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics) func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { f(w, reqWidth, stat) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/.travis.yml0000644000000000000000000000023115024302472024054 0ustar rootrootlanguage: go arch: - amd64 - ppc64le go: - 1.14.x script: - go test -race ./... - for i in _examples/*/; do go build $i/*.go || exit 1; done dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/bar_test.go0000644000000000000000000001176115024302472024107 0ustar rootrootpackage mpb_test import ( "bytes" "fmt" "io/ioutil" "strings" "sync/atomic" "testing" "time" "unicode/utf8" "github.com/vbauerster/mpb/v6" "github.com/vbauerster/mpb/v6/decor" ) func TestBarCompleted(t *testing.T) { p := mpb.New(mpb.WithWidth(80), mpb.WithOutput(ioutil.Discard)) total := 80 bar := p.AddBar(int64(total)) var count int for !bar.Completed() { time.Sleep(10 * time.Millisecond) bar.Increment() count++ } p.Wait() if count != total { t.Errorf("got count: %d, expected %d\n", count, total) } } func TestBarID(t *testing.T) { p := mpb.New(mpb.WithWidth(80), mpb.WithOutput(ioutil.Discard)) total := 100 wantID := 11 bar := p.AddBar(int64(total), mpb.BarID(wantID)) go func() { for i := 0; i < total; i++ { time.Sleep(50 * time.Millisecond) bar.Increment() } }() gotID := bar.ID() if gotID != wantID { t.Errorf("Expected bar id: %d, got %d\n", wantID, gotID) } bar.Abort(true) p.Wait() } func TestBarSetRefill(t *testing.T) { var buf bytes.Buffer p := mpb.New(mpb.WithOutput(&buf), mpb.WithWidth(100)) total := 100 till := 30 refillRune, _ := utf8.DecodeLastRuneInString(mpb.BarDefaultStyle) bar := p.AddBar(int64(total), mpb.BarFillerTrim()) bar.SetRefill(int64(till)) bar.IncrBy(till) for i := 0; i < total-till; i++ { bar.Increment() time.Sleep(10 * time.Millisecond) } p.Wait() wantBar := fmt.Sprintf("[%s%s]", strings.Repeat(string(refillRune), till-1), strings.Repeat("=", total-till-1), ) got := string(getLastLine(buf.Bytes())) if !strings.Contains(got, wantBar) { t.Errorf("Want bar: %q, got bar: %q\n", wantBar, got) } } func TestBarHas100PercentWithOnCompleteDecorator(t *testing.T) { var buf bytes.Buffer p := mpb.New(mpb.WithWidth(80), mpb.WithOutput(&buf)) total := 50 bar := p.AddBar(int64(total), mpb.AppendDecorators( decor.OnComplete( decor.Percentage(), "done", ), ), ) for i := 0; i < total; i++ { bar.Increment() time.Sleep(10 * time.Millisecond) } p.Wait() hundred := "100 %" if !bytes.Contains(buf.Bytes(), []byte(hundred)) { t.Errorf("Bar's buffer does not contain: %q\n", hundred) } } func TestBarHas100PercentWithBarRemoveOnComplete(t *testing.T) { var buf bytes.Buffer p := mpb.New(mpb.WithWidth(80), mpb.WithOutput(&buf)) total := 50 bar := p.AddBar(int64(total), mpb.BarRemoveOnComplete(), mpb.AppendDecorators(decor.Percentage()), ) for i := 0; i < total; i++ { bar.Increment() time.Sleep(10 * time.Millisecond) } p.Wait() hundred := "100 %" if !bytes.Contains(buf.Bytes(), []byte(hundred)) { t.Errorf("Bar's buffer does not contain: %q\n", hundred) } } func TestBarStyle(t *testing.T) { var buf bytes.Buffer customFormat := "╢▌▌░╟" total := 80 p := mpb.New(mpb.WithWidth(total), mpb.WithOutput(&buf)) bar := p.Add(int64(total), mpb.NewBarFiller(customFormat), mpb.BarFillerTrim()) for i := 0; i < total; i++ { bar.Increment() time.Sleep(10 * time.Millisecond) } p.Wait() runes := []rune(customFormat) wantBar := fmt.Sprintf("%s%s%s", string(runes[0]), strings.Repeat(string(runes[1]), total-2), string(runes[len(runes)-1]), ) got := string(getLastLine(buf.Bytes())) if !strings.Contains(got, wantBar) { t.Errorf("Want bar: %q:%d, got bar: %q:%d\n", wantBar, utf8.RuneCountInString(wantBar), got, utf8.RuneCountInString(got)) } } func TestBarPanicBeforeComplete(t *testing.T) { var buf bytes.Buffer p := mpb.New( mpb.WithWidth(80), mpb.WithDebugOutput(&buf), mpb.WithOutput(ioutil.Discard), ) total := 100 panicMsg := "Upps!!!" var pCount uint32 bar := p.AddBar(int64(total), mpb.PrependDecorators(panicDecorator(panicMsg, func(st decor.Statistics) bool { if st.Current >= 42 { atomic.AddUint32(&pCount, 1) return true } return false }, )), ) for i := 0; i < total; i++ { time.Sleep(10 * time.Millisecond) bar.Increment() } p.Wait() if pCount != 1 { t.Errorf("Decor called after panic %d times\n", pCount-1) } barStr := buf.String() if !strings.Contains(barStr, panicMsg) { t.Errorf("%q doesn't contain %q\n", barStr, panicMsg) } } func TestBarPanicAfterComplete(t *testing.T) { var buf bytes.Buffer p := mpb.New( mpb.WithWidth(80), mpb.WithDebugOutput(&buf), mpb.WithOutput(ioutil.Discard), ) total := 100 panicMsg := "Upps!!!" var pCount uint32 bar := p.AddBar(int64(total), mpb.PrependDecorators(panicDecorator(panicMsg, func(st decor.Statistics) bool { if st.Completed { atomic.AddUint32(&pCount, 1) return true } return false }, )), ) for i := 0; i < total; i++ { time.Sleep(10 * time.Millisecond) bar.Increment() } p.Wait() if pCount > 2 { t.Error("Decor called after panic more than 2 times\n") } barStr := buf.String() if !strings.Contains(barStr, panicMsg) { t.Errorf("%q doesn't contain %q\n", barStr, panicMsg) } } func panicDecorator(panicMsg string, cond func(decor.Statistics) bool) decor.Decorator { return decor.Any(func(st decor.Statistics) string { if cond(st) { panic(panicMsg) } return "" }) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/example_test.go0000644000000000000000000000377215024302472025001 0ustar rootrootpackage mpb_test import ( crand "crypto/rand" "io" "io/ioutil" "math/rand" "time" "github.com/vbauerster/mpb/v6" "github.com/vbauerster/mpb/v6/decor" ) func Example() { // initialize progress container, with custom width p := mpb.New(mpb.WithWidth(64)) total := 100 name := "Single Bar:" // adding a single bar, which will inherit container's width bar := p.Add(int64(total), // progress bar filler with customized style mpb.NewBarFiller("╢▌▌░╟"), mpb.PrependDecorators( // display our name with one space on the right decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), // replace ETA decorator with "done" message, OnComplete event decor.OnComplete( // ETA decorator with ewma age of 60, and width reservation of 4 decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WC{W: 4}), "done", ), ), mpb.AppendDecorators(decor.Percentage()), ) // simulating some work max := 100 * time.Millisecond for i := 0; i < total; i++ { // start variable is solely for EWMA calculation // EWMA's unit of measure is an iteration's duration start := time.Now() time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10) bar.Increment() // we need to call DecoratorEwmaUpdate to fulfill ewma decorator's contract bar.DecoratorEwmaUpdate(time.Since(start)) } // wait for our bar to complete and flush p.Wait() } func ExampleBar_Completed() { p := mpb.New() bar := p.AddBar(100) max := 100 * time.Millisecond for !bar.Completed() { time.Sleep(time.Duration(rand.Intn(10)+1) * max / 10) bar.Increment() } p.Wait() } func ExampleBar_ProxyReader() { // import crand "crypto/rand" var total int64 = 1024 * 1024 * 500 reader := io.LimitReader(crand.Reader, total) p := mpb.New() bar := p.AddBar(total, mpb.AppendDecorators( decor.CountersKibiByte("% .2f / % .2f"), ), ) // create proxy reader proxyReader := bar.ProxyReader(reader) defer proxyReader.Close() // and copy from reader, ignoring errors io.Copy(ioutil.Discard, proxyReader) p.Wait() } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/UNLICENSE0000644000000000000000000000227315024302472023223 0ustar rootrootThis is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/go.sum0000644000000000000000000000202015024302472023074 0ustar rootrootgithub.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 h1:hZR0X1kPW+nwyJ9xRxqZk1vx5RUObAPBdKVvXPDUH/E= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/bar_filler_bar.go0000644000000000000000000001024115024302472025221 0ustar rootrootpackage mpb import ( "bytes" "io" "unicode/utf8" "github.com/mattn/go-runewidth" "github.com/rivo/uniseg" "github.com/vbauerster/mpb/v6/decor" "github.com/vbauerster/mpb/v6/internal" ) const ( rLeft = iota rFill rTip rSpace rRight rRevTip rRefill ) // BarDefaultStyle is a style for rendering a progress bar. // It consist of 7 ordered runes: // // '1st rune' stands for left boundary rune // // '2nd rune' stands for fill rune // // '3rd rune' stands for tip rune // // '4th rune' stands for space rune // // '5th rune' stands for right boundary rune // // '6th rune' stands for reverse tip rune // // '7th rune' stands for refill rune // const BarDefaultStyle string = "[=>-]<+" type barFiller struct { format [][]byte rwidth []int tip []byte refill int64 reverse bool flush func(io.Writer, *space, [][]byte) } type space struct { space []byte rwidth int count int } // NewBarFiller returns a BarFiller implementation which renders a // progress bar in regular direction. If style is empty string, // BarDefaultStyle is applied. To be used with `*Progress.Add(...) // *Bar` method. func NewBarFiller(style string) BarFiller { return newBarFiller(style, false) } // NewBarFillerRev returns a BarFiller implementation which renders a // progress bar in reverse direction. If style is empty string, // BarDefaultStyle is applied. To be used with `*Progress.Add(...) // *Bar` method. func NewBarFillerRev(style string) BarFiller { return newBarFiller(style, true) } // NewBarFillerPick pick between regular and reverse BarFiller implementation // based on rev param. To be used with `*Progress.Add(...) *Bar` method. func NewBarFillerPick(style string, rev bool) BarFiller { return newBarFiller(style, rev) } func newBarFiller(style string, rev bool) BarFiller { bf := &barFiller{ format: make([][]byte, len(BarDefaultStyle)), rwidth: make([]int, len(BarDefaultStyle)), reverse: rev, } bf.parse(BarDefaultStyle) if style != "" && style != BarDefaultStyle { bf.parse(style) } return bf } func (s *barFiller) parse(style string) { if !utf8.ValidString(style) { panic("invalid bar style") } srcFormat := make([][]byte, len(BarDefaultStyle)) srcRwidth := make([]int, len(BarDefaultStyle)) i := 0 for gr := uniseg.NewGraphemes(style); i < len(BarDefaultStyle) && gr.Next(); i++ { srcFormat[i] = gr.Bytes() srcRwidth[i] = runewidth.StringWidth(gr.Str()) } copy(s.format, srcFormat[:i]) copy(s.rwidth, srcRwidth[:i]) if s.reverse { s.tip = s.format[rRevTip] s.flush = reverseFlush } else { s.tip = s.format[rTip] s.flush = regularFlush } } func (s *barFiller) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { width := internal.CheckRequestedWidth(reqWidth, stat.AvailableWidth) brackets := s.rwidth[rLeft] + s.rwidth[rRight] if width < brackets { return } // don't count brackets as progress width -= brackets w.Write(s.format[rLeft]) defer w.Write(s.format[rRight]) cwidth := int(internal.PercentageRound(stat.Total, stat.Current, width)) space := &space{ space: s.format[rSpace], rwidth: s.rwidth[rSpace], count: width - cwidth, } index, refill := 0, 0 bb := make([][]byte, cwidth) if cwidth > 0 && cwidth != width { bb[index] = s.tip cwidth -= s.rwidth[rTip] index++ } if stat.Refill > 0 { refill = int(internal.PercentageRound(stat.Total, int64(stat.Refill), width)) if refill > cwidth { refill = cwidth } cwidth -= refill } for cwidth > 0 { bb[index] = s.format[rFill] cwidth -= s.rwidth[rFill] index++ } for refill > 0 { bb[index] = s.format[rRefill] refill -= s.rwidth[rRefill] index++ } if cwidth+refill < 0 || space.rwidth > 1 { buf := new(bytes.Buffer) s.flush(buf, space, bb[:index]) io.WriteString(w, runewidth.Truncate(buf.String(), width, "…")) return } s.flush(w, space, bb) } func regularFlush(w io.Writer, space *space, bb [][]byte) { for i := len(bb) - 1; i >= 0; i-- { w.Write(bb[i]) } for space.count > 0 { w.Write(space.space) space.count -= space.rwidth } } func reverseFlush(w io.Writer, space *space, bb [][]byte) { for space.count > 0 { w.Write(space.space) space.count -= space.rwidth } for i := 0; i < len(bb); i++ { w.Write(bb[i]) } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/proxyreader.go0000644000000000000000000000330215024302472024640 0ustar rootrootpackage mpb import ( "io" "io/ioutil" "time" ) type proxyReader struct { io.ReadCloser bar *Bar } func (x *proxyReader) Read(p []byte) (int, error) { n, err := x.ReadCloser.Read(p) x.bar.IncrBy(n) if err == io.EOF { go x.bar.SetTotal(0, true) } return n, err } type proxyWriterTo struct { io.ReadCloser // *proxyReader wt io.WriterTo bar *Bar } func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) { n, err := x.wt.WriteTo(w) x.bar.IncrInt64(n) if err == io.EOF { go x.bar.SetTotal(0, true) } return n, err } type ewmaProxyReader struct { io.ReadCloser // *proxyReader bar *Bar iT time.Time } func (x *ewmaProxyReader) Read(p []byte) (int, error) { n, err := x.ReadCloser.Read(p) if n > 0 { x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) x.iT = time.Now() } return n, err } type ewmaProxyWriterTo struct { io.ReadCloser // *ewmaProxyReader wt io.WriterTo // *proxyWriterTo bar *Bar iT time.Time } func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { n, err := x.wt.WriteTo(w) if n > 0 { x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) x.iT = time.Now() } return n, err } func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser { rc := toReadCloser(r) rc = &proxyReader{rc, bar} if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators { now := time.Now() rc = &ewmaProxyReader{rc, bar, now} if isWriterTo { rc = &ewmaProxyWriterTo{rc, wt, bar, now} } } else if isWriterTo { rc = &proxyWriterTo{rc, wt, bar} } return rc } func toReadCloser(r io.Reader) io.ReadCloser { if rc, ok := r.(io.ReadCloser); ok { return rc } return ioutil.NopCloser(r) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/.gitignore0000644000000000000000000000017315024302472023740 0ustar rootroot# Test binary, build with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/proxyreader_test.go0000644000000000000000000000400215024302472025675 0ustar rootrootpackage mpb_test import ( "bytes" "io" "io/ioutil" "strings" "testing" "github.com/vbauerster/mpb/v6" ) const content = `Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.` type testReader struct { io.Reader called bool } func (r *testReader) Read(p []byte) (n int, err error) { r.called = true return r.Reader.Read(p) } func TestProxyReader(t *testing.T) { p := mpb.New(mpb.WithOutput(ioutil.Discard)) tReader := &testReader{strings.NewReader(content), false} bar := p.AddBar(int64(len(content)), mpb.BarFillerTrim()) var buf bytes.Buffer _, err := io.Copy(&buf, bar.ProxyReader(tReader)) if err != nil { t.Errorf("Error copying from reader: %+v\n", err) } p.Wait() if !tReader.called { t.Error("Read not called") } if got := buf.String(); got != content { t.Errorf("Expected content: %s, got: %s\n", content, got) } } type testWriterTo struct { io.Reader wt io.WriterTo called bool } func (wt *testWriterTo) WriteTo(w io.Writer) (n int64, err error) { wt.called = true return wt.wt.WriteTo(w) } func TestProxyWriterTo(t *testing.T) { p := mpb.New(mpb.WithOutput(ioutil.Discard)) var reader io.Reader = strings.NewReader(content) wt := reader.(io.WriterTo) tReader := &testWriterTo{reader, wt, false} bar := p.AddBar(int64(len(content)), mpb.BarFillerTrim()) var buf bytes.Buffer _, err := io.Copy(&buf, bar.ProxyReader(tReader)) if err != nil { t.Errorf("Error copying from reader: %+v\n", err) } p.Wait() if !tReader.called { t.Error("WriteTo not called") } if got := buf.String(); got != content { t.Errorf("Expected content: %s, got: %s\n", content, got) } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/container_option.go0000644000000000000000000000602415024302472025652 0ustar rootrootpackage mpb import ( "io" "io/ioutil" "sync" "time" "github.com/vbauerster/mpb/v6/internal" ) // ContainerOption is a func option to alter default behavior of a bar // container. Container term refers to a Progress struct which can // hold one or more Bars. type ContainerOption func(*pState) // WithWaitGroup provides means to have a single joint point. If // *sync.WaitGroup is provided, you can safely call just p.Wait() // without calling Wait() on provided *sync.WaitGroup. Makes sense // when there are more than one bar to render. func WithWaitGroup(wg *sync.WaitGroup) ContainerOption { return func(s *pState) { s.uwg = wg } } // WithWidth sets container width. If not set it defaults to terminal // width. A bar added to the container will inherit its width, unless // overridden by `func BarWidth(int) BarOption`. func WithWidth(width int) ContainerOption { return func(s *pState) { s.reqWidth = width } } // WithRefreshRate overrides default 120ms refresh rate. func WithRefreshRate(d time.Duration) ContainerOption { return func(s *pState) { s.rr = d } } // WithManualRefresh disables internal auto refresh time.Ticker. // Refresh will occur upon receive value from provided ch. func WithManualRefresh(ch <-chan interface{}) ContainerOption { return func(s *pState) { s.externalRefresh = ch } } // WithRenderDelay delays rendering. By default rendering starts as // soon as bar is added, with this option it's possible to delay // rendering process by keeping provided chan unclosed. In other words // rendering will start as soon as provided chan is closed. func WithRenderDelay(ch <-chan struct{}) ContainerOption { return func(s *pState) { s.renderDelay = ch } } // WithShutdownNotifier provided chanel will be closed, after all bars // have been rendered. func WithShutdownNotifier(ch chan struct{}) ContainerOption { return func(s *pState) { s.shutdownNotifier = ch } } // WithOutput overrides default os.Stdout output. Setting it to nil // will effectively disable auto refresh rate and discard any output, // useful if you want to disable progress bars with little overhead. func WithOutput(w io.Writer) ContainerOption { return func(s *pState) { if w == nil { s.output = ioutil.Discard s.outputDiscarded = true return } s.output = w } } // WithDebugOutput sets debug output. func WithDebugOutput(w io.Writer) ContainerOption { if w == nil { return nil } return func(s *pState) { s.debugOut = w } } // PopCompletedMode will pop and stop rendering completed bars. func PopCompletedMode() ContainerOption { return func(s *pState) { s.popCompleted = true } } // ContainerOptional will invoke provided option only when pick is true. func ContainerOptional(option ContainerOption, pick bool) ContainerOption { return ContainerOptOn(option, internal.Predicate(pick)) } // ContainerOptOn will invoke provided option only when higher order // predicate evaluates to true. func ContainerOptOn(option ContainerOption, predicate func() bool) ContainerOption { if predicate() { return option } return nil } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/0000755000000000000000000000000015024302472023043 5ustar rootrootdependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/size_type_test.go0000644000000000000000000000743515024302472026455 0ustar rootrootpackage decor import ( "fmt" "testing" ) func TestB1024(t *testing.T) { cases := map[string]struct { value int64 verb string expected string }{ "verb %f": {12345678, "%f", "11.773756MiB"}, "verb %.0f": {12345678, "%.0f", "12MiB"}, "verb %.1f": {12345678, "%.1f", "11.8MiB"}, "verb %.2f": {12345678, "%.2f", "11.77MiB"}, "verb %.3f": {12345678, "%.3f", "11.774MiB"}, "verb % f": {12345678, "% f", "11.773756 MiB"}, "verb % .0f": {12345678, "% .0f", "12 MiB"}, "verb % .1f": {12345678, "% .1f", "11.8 MiB"}, "verb % .2f": {12345678, "% .2f", "11.77 MiB"}, "verb % .3f": {12345678, "% .3f", "11.774 MiB"}, "1000 %f": {1000, "%f", "1000.000000b"}, "1000 %d": {1000, "%d", "1000b"}, "1000 %s": {1000, "%s", "1000b"}, "1024 %f": {1024, "%f", "1.000000KiB"}, "1024 %d": {1024, "%d", "1KiB"}, "1024 %.1f": {1024, "%.1f", "1.0KiB"}, "1024 %s": {1024, "%s", "1KiB"}, "3*MiB+140KiB %f": {3*int64(_iMiB) + 140*int64(_iKiB), "%f", "3.136719MiB"}, "3*MiB+140KiB %d": {3*int64(_iMiB) + 140*int64(_iKiB), "%d", "3MiB"}, "3*MiB+140KiB %.1f": {3*int64(_iMiB) + 140*int64(_iKiB), "%.1f", "3.1MiB"}, "3*MiB+140KiB %s": {3*int64(_iMiB) + 140*int64(_iKiB), "%s", "3.13671875MiB"}, "2*GiB %f": {2 * int64(_iGiB), "%f", "2.000000GiB"}, "2*GiB %d": {2 * int64(_iGiB), "%d", "2GiB"}, "2*GiB %.1f": {2 * int64(_iGiB), "%.1f", "2.0GiB"}, "2*GiB %s": {2 * int64(_iGiB), "%s", "2GiB"}, "4*TiB %f": {4 * int64(_iTiB), "%f", "4.000000TiB"}, "4*TiB %d": {4 * int64(_iTiB), "%d", "4TiB"}, "4*TiB %.1f": {4 * int64(_iTiB), "%.1f", "4.0TiB"}, "4*TiB %s": {4 * int64(_iTiB), "%s", "4TiB"}, } for name, tc := range cases { t.Run(name, func(t *testing.T) { got := fmt.Sprintf(tc.verb, SizeB1024(tc.value)) if got != tc.expected { t.Fatalf("expected: %q, got: %q\n", tc.expected, got) } }) } } func TestB1000(t *testing.T) { cases := map[string]struct { value int64 verb string expected string }{ "verb %f": {12345678, "%f", "12.345678MB"}, "verb %.0f": {12345678, "%.0f", "12MB"}, "verb %.1f": {12345678, "%.1f", "12.3MB"}, "verb %.2f": {12345678, "%.2f", "12.35MB"}, "verb %.3f": {12345678, "%.3f", "12.346MB"}, "verb % f": {12345678, "% f", "12.345678 MB"}, "verb % .0f": {12345678, "% .0f", "12 MB"}, "verb % .1f": {12345678, "% .1f", "12.3 MB"}, "verb % .2f": {12345678, "% .2f", "12.35 MB"}, "verb % .3f": {12345678, "% .3f", "12.346 MB"}, "1000 %f": {1000, "%f", "1.000000KB"}, "1000 %d": {1000, "%d", "1KB"}, "1000 %s": {1000, "%s", "1KB"}, "1024 %f": {1024, "%f", "1.024000KB"}, "1024 %d": {1024, "%d", "1KB"}, "1024 %.1f": {1024, "%.1f", "1.0KB"}, "1024 %s": {1024, "%s", "1.024KB"}, "3*MB+140*KB %f": {3*int64(_MB) + 140*int64(_KB), "%f", "3.140000MB"}, "3*MB+140*KB %d": {3*int64(_MB) + 140*int64(_KB), "%d", "3MB"}, "3*MB+140*KB %.1f": {3*int64(_MB) + 140*int64(_KB), "%.1f", "3.1MB"}, "3*MB+140*KB %s": {3*int64(_MB) + 140*int64(_KB), "%s", "3.14MB"}, "2*GB %f": {2 * int64(_GB), "%f", "2.000000GB"}, "2*GB %d": {2 * int64(_GB), "%d", "2GB"}, "2*GB %.1f": {2 * int64(_GB), "%.1f", "2.0GB"}, "2*GB %s": {2 * int64(_GB), "%s", "2GB"}, "4*TB %f": {4 * int64(_TB), "%f", "4.000000TB"}, "4*TB %d": {4 * int64(_TB), "%d", "4TB"}, "4*TB %.1f": {4 * int64(_TB), "%.1f", "4.0TB"}, "4*TB %s": {4 * int64(_TB), "%s", "4TB"}, } for name, tc := range cases { t.Run(name, func(t *testing.T) { got := fmt.Sprintf(tc.verb, SizeB1000(tc.value)) if got != tc.expected { t.Fatalf("expected: %q, got: %q\n", tc.expected, got) } }) } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/moving_average.go0000644000000000000000000000270115024302472026363 0ustar rootrootpackage decor import ( "sort" "sync" "github.com/VividCortex/ewma" ) type threadSafeMovingAverage struct { ewma.MovingAverage mu sync.Mutex } func (s *threadSafeMovingAverage) Add(value float64) { s.mu.Lock() s.MovingAverage.Add(value) s.mu.Unlock() } func (s *threadSafeMovingAverage) Value() float64 { s.mu.Lock() defer s.mu.Unlock() return s.MovingAverage.Value() } func (s *threadSafeMovingAverage) Set(value float64) { s.mu.Lock() s.MovingAverage.Set(value) s.mu.Unlock() } // NewThreadSafeMovingAverage converts provided ewma.MovingAverage // into thread safe ewma.MovingAverage. func NewThreadSafeMovingAverage(average ewma.MovingAverage) ewma.MovingAverage { if tsma, ok := average.(*threadSafeMovingAverage); ok { return tsma } return &threadSafeMovingAverage{MovingAverage: average} } type medianWindow [3]float64 func (s *medianWindow) Len() int { return len(s) } func (s *medianWindow) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s *medianWindow) Less(i, j int) bool { return s[i] < s[j] } func (s *medianWindow) Add(value float64) { s[0], s[1] = s[1], s[2] s[2] = value } func (s *medianWindow) Value() float64 { tmp := *s sort.Sort(&tmp) return tmp[1] } func (s *medianWindow) Set(value float64) { for i := 0; i < len(s); i++ { s[i] = value } } // NewMedian is fixed last 3 samples median MovingAverage. func NewMedian() ewma.MovingAverage { return NewThreadSafeMovingAverage(new(medianWindow)) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/speed.go0000644000000000000000000001027515024302472024477 0ustar rootrootpackage decor import ( "fmt" "io" "math" "time" "github.com/VividCortex/ewma" ) // FmtAsSpeed adds "/s" to the end of the input formatter. To be // used with SizeB1000 or SizeB1024 types, for example: // // fmt.Printf("%.1f", FmtAsSpeed(SizeB1024(2048))) // func FmtAsSpeed(input fmt.Formatter) fmt.Formatter { return &speedFormatter{input} } type speedFormatter struct { fmt.Formatter } func (self *speedFormatter) Format(st fmt.State, verb rune) { self.Formatter.Format(st, verb) io.WriteString(st, "/s") } // EwmaSpeed exponential-weighted-moving-average based speed decorator. // For this decorator to work correctly you have to measure each // iteration's duration and pass it to the // *Bar.DecoratorEwmaUpdate(time.Duration) method after each increment. func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { var average ewma.MovingAverage if age == 0 { average = ewma.NewMovingAverage() } else { average = ewma.NewMovingAverage(age) } return MovingAverageSpeed(unit, format, NewThreadSafeMovingAverage(average), wcc...) } // MovingAverageSpeed decorator relies on MovingAverage implementation // to calculate its average. // // `unit` one of [0|UnitKiB|UnitKB] zero for no unit // // `format` printf compatible verb for value, like "%f" or "%d" // // `average` MovingAverage implementation // // `wcc` optional WC config // // format examples: // // unit=UnitKiB, format="%.1f" output: "1.0MiB/s" // unit=UnitKiB, format="% .1f" output: "1.0 MiB/s" // unit=UnitKB, format="%.1f" output: "1.0MB/s" // unit=UnitKB, format="% .1f" output: "1.0 MB/s" // func MovingAverageSpeed(unit int, format string, average ewma.MovingAverage, wcc ...WC) Decorator { if format == "" { format = "%.0f" } d := &movingAverageSpeed{ WC: initWC(wcc...), average: average, producer: chooseSpeedProducer(unit, format), } return d } type movingAverageSpeed struct { WC producer func(float64) string average ewma.MovingAverage msg string } func (d *movingAverageSpeed) Decor(s Statistics) string { if !s.Completed { var speed float64 if v := d.average.Value(); v > 0 { speed = 1 / v } d.msg = d.producer(speed * 1e9) } return d.FormatMsg(d.msg) } func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { durPerByte := float64(dur) / float64(n) if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { return } d.average.Add(durPerByte) } // AverageSpeed decorator with dynamic unit measure adjustment. It's // a wrapper of NewAverageSpeed. func AverageSpeed(unit int, format string, wcc ...WC) Decorator { return NewAverageSpeed(unit, format, time.Now(), wcc...) } // NewAverageSpeed decorator with dynamic unit measure adjustment and // user provided start time. // // `unit` one of [0|UnitKiB|UnitKB] zero for no unit // // `format` printf compatible verb for value, like "%f" or "%d" // // `startTime` start time // // `wcc` optional WC config // // format examples: // // unit=UnitKiB, format="%.1f" output: "1.0MiB/s" // unit=UnitKiB, format="% .1f" output: "1.0 MiB/s" // unit=UnitKB, format="%.1f" output: "1.0MB/s" // unit=UnitKB, format="% .1f" output: "1.0 MB/s" // func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator { if format == "" { format = "%.0f" } d := &averageSpeed{ WC: initWC(wcc...), startTime: startTime, producer: chooseSpeedProducer(unit, format), } return d } type averageSpeed struct { WC startTime time.Time producer func(float64) string msg string } func (d *averageSpeed) Decor(s Statistics) string { if !s.Completed { speed := float64(s.Current) / float64(time.Since(d.startTime)) d.msg = d.producer(speed * 1e9) } return d.FormatMsg(d.msg) } func (d *averageSpeed) AverageAdjust(startTime time.Time) { d.startTime = startTime } func chooseSpeedProducer(unit int, format string) func(float64) string { switch unit { case UnitKiB: return func(speed float64) string { return fmt.Sprintf(format, FmtAsSpeed(SizeB1024(math.Round(speed)))) } case UnitKB: return func(speed float64) string { return fmt.Sprintf(format, FmtAsSpeed(SizeB1000(math.Round(speed)))) } default: return func(speed float64) string { return fmt.Sprintf(format, speed) } } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/name.go0000644000000000000000000000044015024302472024310 0ustar rootrootpackage decor // Name decorator displays text that is set once and can't be changed // during decorator's lifetime. // // `str` string to display // // `wcc` optional WC config // func Name(str string, wcc ...WC) Decorator { return Any(func(Statistics) string { return str }, wcc...) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/sizeb1024_string.go0000644000000000000000000000161015024302472026401 0ustar rootroot// Code generated by "stringer -type=SizeB1024 -trimprefix=_i"; DO NOT EDIT. package decor import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[_ib-1] _ = x[_iKiB-1024] _ = x[_iMiB-1048576] _ = x[_iGiB-1073741824] _ = x[_iTiB-1099511627776] } const ( _SizeB1024_name_0 = "b" _SizeB1024_name_1 = "KiB" _SizeB1024_name_2 = "MiB" _SizeB1024_name_3 = "GiB" _SizeB1024_name_4 = "TiB" ) func (i SizeB1024) String() string { switch { case i == 1: return _SizeB1024_name_0 case i == 1024: return _SizeB1024_name_1 case i == 1048576: return _SizeB1024_name_2 case i == 1073741824: return _SizeB1024_name_3 case i == 1099511627776: return _SizeB1024_name_4 default: return "SizeB1024(" + strconv.FormatInt(int64(i), 10) + ")" } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/size_type.go0000644000000000000000000000372015024302472025407 0ustar rootrootpackage decor import ( "fmt" "io" "math" "strconv" ) //go:generate stringer -type=SizeB1024 -trimprefix=_i //go:generate stringer -type=SizeB1000 -trimprefix=_ const ( _ib SizeB1024 = iota + 1 _iKiB SizeB1024 = 1 << (iota * 10) _iMiB _iGiB _iTiB ) // SizeB1024 named type, which implements fmt.Formatter interface. It // adjusts its value according to byte size multiple by 1024 and appends // appropriate size marker (KiB, MiB, GiB, TiB). type SizeB1024 int64 func (self SizeB1024) Format(st fmt.State, verb rune) { var prec int switch verb { case 'd': case 's': prec = -1 default: if p, ok := st.Precision(); ok { prec = p } else { prec = 6 } } var unit SizeB1024 switch { case self < _iKiB: unit = _ib case self < _iMiB: unit = _iKiB case self < _iGiB: unit = _iMiB case self < _iTiB: unit = _iGiB case self <= math.MaxInt64: unit = _iTiB } io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { io.WriteString(st, " ") } io.WriteString(st, unit.String()) } const ( _b SizeB1000 = 1 _KB SizeB1000 = _b * 1000 _MB SizeB1000 = _KB * 1000 _GB SizeB1000 = _MB * 1000 _TB SizeB1000 = _GB * 1000 ) // SizeB1000 named type, which implements fmt.Formatter interface. It // adjusts its value according to byte size multiple by 1000 and appends // appropriate size marker (KB, MB, GB, TB). type SizeB1000 int64 func (self SizeB1000) Format(st fmt.State, verb rune) { var prec int switch verb { case 'd': case 's': prec = -1 default: if p, ok := st.Precision(); ok { prec = p } else { prec = 6 } } var unit SizeB1000 switch { case self < _KB: unit = _b case self < _MB: unit = _KB case self < _GB: unit = _MB case self < _TB: unit = _GB case self <= math.MaxInt64: unit = _TB } io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { io.WriteString(st, " ") } io.WriteString(st, unit.String()) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/spinner.go0000644000000000000000000000077715024302472025063 0ustar rootrootpackage decor var defaultSpinnerStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} // Spinner returns spinner decorator. // // `frames` spinner frames, if nil or len==0, default is used // // `wcc` optional WC config func Spinner(frames []string, wcc ...WC) Decorator { if len(frames) == 0 { frames = defaultSpinnerStyle } var count uint f := func(s Statistics) string { frame := frames[count%uint(len(frames))] count++ return frame } return Any(f, wcc...) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/doc.go0000644000000000000000000000117415024302472024142 0ustar rootroot// Package decor provides common decorators for "github.com/vbauerster/mpb/v6" module. /* Some decorators returned by this package might have a closure state. It is ok to use decorators concurrently, unless you share the same decorator among multiple *mpb.Bar instances. To avoid data races, create new decorator per *mpb.Bar instance. Don't: p := mpb.New() name := decor.Name("bar") p.AddBar(100, mpb.AppendDecorators(name)) p.AddBar(100, mpb.AppendDecorators(name)) Do: p := mpb.New() p.AddBar(100, mpb.AppendDecorators(decor.Name("bar1"))) p.AddBar(100, mpb.AppendDecorators(decor.Name("bar2"))) */ package decor dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/percentage.go0000644000000000000000000000213215024302472025505 0ustar rootrootpackage decor import ( "fmt" "io" "strconv" "github.com/vbauerster/mpb/v6/internal" ) type percentageType float64 func (s percentageType) Format(st fmt.State, verb rune) { var prec int switch verb { case 'd': case 's': prec = -1 default: if p, ok := st.Precision(); ok { prec = p } else { prec = 6 } } io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) if st.Flag(' ') { io.WriteString(st, " ") } io.WriteString(st, "%") } // Percentage returns percentage decorator. It's a wrapper of NewPercentage. func Percentage(wcc ...WC) Decorator { return NewPercentage("% d", wcc...) } // NewPercentage percentage decorator with custom format string. // // format examples: // // format="%.1f" output: "1.0%" // format="% .1f" output: "1.0 %" // format="%d" output: "1%" // format="% d" output: "1 %" // func NewPercentage(format string, wcc ...WC) Decorator { if format == "" { format = "% d" } f := func(s Statistics) string { p := internal.Percentage(s.Total, s.Current, 100) return fmt.Sprintf(format, percentageType(p)) } return Any(f, wcc...) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/decorator.go0000644000000000000000000001166415024302472025364 0ustar rootrootpackage decor import ( "fmt" "time" "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" ) const ( // DidentRight bit specifies identation direction. // |foo |b | With DidentRight // | foo| b| Without DidentRight DidentRight = 1 << iota // DextraSpace bit adds extra space, makes sense with DSyncWidth only. // When DidentRight bit set, the space will be added to the right, // otherwise to the left. DextraSpace // DSyncWidth bit enables same column width synchronization. // Effective with multiple bars only. DSyncWidth // DSyncWidthR is shortcut for DSyncWidth|DidentRight DSyncWidthR = DSyncWidth | DidentRight // DSyncSpace is shortcut for DSyncWidth|DextraSpace DSyncSpace = DSyncWidth | DextraSpace // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight ) // TimeStyle enum. type TimeStyle int // TimeStyle kinds. const ( ET_STYLE_GO TimeStyle = iota ET_STYLE_HHMMSS ET_STYLE_HHMM ET_STYLE_MMSS ) // Statistics consists of progress related statistics, that Decorator // may need. type Statistics struct { ID int AvailableWidth int Total int64 Current int64 Refill int64 Completed bool } // Decorator interface. // Most of the time there is no need to implement this interface // manually, as decor package already provides a wide range of decorators // which implement this interface. If however built-in decorators don't // meet your needs, you're free to implement your own one by implementing // this particular interface. The easy way to go is to convert a // `DecorFunc` into a `Decorator` interface by using provided // `func Any(DecorFunc, ...WC) Decorator`. type Decorator interface { Configurator Synchronizer Decor(Statistics) string } // DecorFunc func type. // To be used with `func Any`(DecorFunc, ...WC) Decorator`. type DecorFunc func(Statistics) string // Synchronizer interface. // All decorators implement this interface implicitly. Its Sync // method exposes width sync channel, if DSyncWidth bit is set. type Synchronizer interface { Sync() (chan int, bool) } // Configurator interface. type Configurator interface { GetConf() WC SetConf(WC) } // Wrapper interface. // If you're implementing custom Decorator by wrapping a built-in one, // it is necessary to implement this interface to retain functionality // of built-in Decorator. type Wrapper interface { Base() Decorator } // EwmaDecorator interface. // EWMA based decorators should implement this one. type EwmaDecorator interface { EwmaUpdate(int64, time.Duration) } // AverageDecorator interface. // Average decorators should implement this interface to provide start // time adjustment facility, for resume-able tasks. type AverageDecorator interface { AverageAdjust(time.Time) } // ShutdownListener interface. // If decorator needs to be notified once upon bar shutdown event, so // this is the right interface to implement. type ShutdownListener interface { Shutdown() } // Global convenience instances of WC with sync width bit set. // To be used with multiple bars only, i.e. not effective for single bar usage. var ( WCSyncWidth = WC{C: DSyncWidth} WCSyncWidthR = WC{C: DSyncWidthR} WCSyncSpace = WC{C: DSyncSpace} WCSyncSpaceR = WC{C: DSyncSpaceR} ) // WC is a struct with two public fields W and C, both of int type. // W represents width and C represents bit set of width related config. // A decorator should embed WC, to enable width synchronization. type WC struct { W int C int fill func(s string, w int) string wsync chan int } // FormatMsg formats final message according to WC.W and WC.C. // Should be called by any Decorator implementation. func (wc *WC) FormatMsg(msg string) string { pureWidth := runewidth.StringWidth(msg) stripWidth := runewidth.StringWidth(stripansi.Strip(msg)) maxCell := wc.W if (wc.C & DSyncWidth) != 0 { cellCount := stripWidth if (wc.C & DextraSpace) != 0 { cellCount++ } wc.wsync <- cellCount maxCell = <-wc.wsync } return wc.fill(msg, maxCell+(pureWidth-stripWidth)) } // Init initializes width related config. func (wc *WC) Init() WC { wc.fill = runewidth.FillLeft if (wc.C & DidentRight) != 0 { wc.fill = runewidth.FillRight } if (wc.C & DSyncWidth) != 0 { // it's deliberate choice to override wsync on each Init() call, // this way globals like WCSyncSpace can be reused wc.wsync = make(chan int) } return *wc } // Sync is implementation of Synchronizer interface. func (wc *WC) Sync() (chan int, bool) { if (wc.C&DSyncWidth) != 0 && wc.wsync == nil { panic(fmt.Sprintf("%T is not initialized", wc)) } return wc.wsync, (wc.C & DSyncWidth) != 0 } // GetConf is implementation of Configurator interface. func (wc *WC) GetConf() WC { return *wc } // SetConf is implementation of Configurator interface. func (wc *WC) SetConf(conf WC) { *wc = conf.Init() } func initWC(wcc ...WC) WC { var wc WC for _, nwc := range wcc { wc = nwc } return wc.Init() } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/merge.go0000644000000000000000000000460315024302472024474 0ustar rootrootpackage decor import ( "strings" "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" ) // Merge wraps its decorator argument with intention to sync width // with several decorators of another bar. Visual example: // // +----+--------+---------+--------+ // | B1 | MERGE(D, P1, Pn) | // +----+--------+---------+--------+ // | B2 | D0 | D1 | Dn | // +----+--------+---------+--------+ // func Merge(decorator Decorator, placeholders ...WC) Decorator { if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 { return decorator } md := &mergeDecorator{ Decorator: decorator, wc: decorator.GetConf(), placeHolders: make([]*placeHolderDecorator, len(placeholders)), } decorator.SetConf(WC{}) for i, wc := range placeholders { if (wc.C & DSyncWidth) == 0 { return decorator } md.placeHolders[i] = &placeHolderDecorator{wc.Init()} } return md } type mergeDecorator struct { Decorator wc WC placeHolders []*placeHolderDecorator } func (d *mergeDecorator) GetConf() WC { return d.wc } func (d *mergeDecorator) SetConf(conf WC) { d.wc = conf.Init() } func (d *mergeDecorator) MergeUnwrap() []Decorator { decorators := make([]Decorator, len(d.placeHolders)) for i, ph := range d.placeHolders { decorators[i] = ph } return decorators } func (d *mergeDecorator) Sync() (chan int, bool) { return d.wc.Sync() } func (d *mergeDecorator) Base() Decorator { return d.Decorator } func (d *mergeDecorator) Decor(s Statistics) string { msg := d.Decorator.Decor(s) pureWidth := runewidth.StringWidth(msg) stripWidth := runewidth.StringWidth(stripansi.Strip(msg)) cellCount := stripWidth if (d.wc.C & DextraSpace) != 0 { cellCount++ } total := runewidth.StringWidth(d.placeHolders[0].FormatMsg("")) pw := (cellCount - total) / len(d.placeHolders) rem := (cellCount - total) % len(d.placeHolders) var diff int for i := 1; i < len(d.placeHolders); i++ { ph := d.placeHolders[i] width := pw - diff if (ph.WC.C & DextraSpace) != 0 { width-- if width < 0 { width = 0 } } max := runewidth.StringWidth(ph.FormatMsg(strings.Repeat(" ", width))) total += max diff = max - pw } d.wc.wsync <- pw + rem max := <-d.wc.wsync return d.wc.fill(msg, max+total+(pureWidth-stripWidth)) } type placeHolderDecorator struct { WC } func (d *placeHolderDecorator) Decor(Statistics) string { return "" } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/speed_test.go0000644000000000000000000001205315024302472025532 0ustar rootrootpackage decor import ( "testing" "time" ) func TestSpeedKiBDecor(t *testing.T) { cases := []struct { name string fmt string unit int current int64 elapsed time.Duration expected string }{ { name: "empty fmt", unit: UnitKiB, fmt: "", current: 0, elapsed: time.Second, expected: "0b/s", }, { name: "UnitKiB:%d:0b", unit: UnitKiB, fmt: "%d", current: 0, elapsed: time.Second, expected: "0b/s", }, { name: "UnitKiB:% .2f:0b", unit: UnitKiB, fmt: "% .2f", current: 0, elapsed: time.Second, expected: "0.00 b/s", }, { name: "UnitKiB:%d:1b", unit: UnitKiB, fmt: "%d", current: 1, elapsed: time.Second, expected: "1b/s", }, { name: "UnitKiB:% .2f:1b", unit: UnitKiB, fmt: "% .2f", current: 1, elapsed: time.Second, expected: "1.00 b/s", }, { name: "UnitKiB:%d:KiB", unit: UnitKiB, fmt: "%d", current: 2 * int64(_iKiB), elapsed: 1 * time.Second, expected: "2KiB/s", }, { name: "UnitKiB:% .f:KiB", unit: UnitKiB, fmt: "% .2f", current: 2 * int64(_iKiB), elapsed: 1 * time.Second, expected: "2.00 KiB/s", }, { name: "UnitKiB:%d:MiB", unit: UnitKiB, fmt: "%d", current: 2 * int64(_iMiB), elapsed: 1 * time.Second, expected: "2MiB/s", }, { name: "UnitKiB:% .2f:MiB", unit: UnitKiB, fmt: "% .2f", current: 2 * int64(_iMiB), elapsed: 1 * time.Second, expected: "2.00 MiB/s", }, { name: "UnitKiB:%d:GiB", unit: UnitKiB, fmt: "%d", current: 2 * int64(_iGiB), elapsed: 1 * time.Second, expected: "2GiB/s", }, { name: "UnitKiB:% .2f:GiB", unit: UnitKiB, fmt: "% .2f", current: 2 * int64(_iGiB), elapsed: 1 * time.Second, expected: "2.00 GiB/s", }, { name: "UnitKiB:%d:TiB", unit: UnitKiB, fmt: "%d", current: 2 * int64(_iTiB), elapsed: 1 * time.Second, expected: "2TiB/s", }, { name: "UnitKiB:% .2f:TiB", unit: UnitKiB, fmt: "% .2f", current: 2 * int64(_iTiB), elapsed: 1 * time.Second, expected: "2.00 TiB/s", }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { decor := NewAverageSpeed(tc.unit, tc.fmt, time.Now().Add(-tc.elapsed)) stat := Statistics{ Current: tc.current, } res := decor.Decor(stat) if res != tc.expected { t.Fatalf("expected: %q, got: %q\n", tc.expected, res) } }) } } func TestSpeedKBDecor(t *testing.T) { cases := []struct { name string fmt string unit int current int64 elapsed time.Duration expected string }{ { name: "empty fmt", unit: UnitKB, fmt: "", current: 0, elapsed: time.Second, expected: "0b/s", }, { name: "UnitKB:%d:0b", unit: UnitKB, fmt: "%d", current: 0, elapsed: time.Second, expected: "0b/s", }, { name: "UnitKB:% .2f:0b", unit: UnitKB, fmt: "% .2f", current: 0, elapsed: time.Second, expected: "0.00 b/s", }, { name: "UnitKB:%d:1b", unit: UnitKB, fmt: "%d", current: 1, elapsed: time.Second, expected: "1b/s", }, { name: "UnitKB:% .2f:1b", unit: UnitKB, fmt: "% .2f", current: 1, elapsed: time.Second, expected: "1.00 b/s", }, { name: "UnitKB:%d:KB", unit: UnitKB, fmt: "%d", current: 2 * int64(_KB), elapsed: 1 * time.Second, expected: "2KB/s", }, { name: "UnitKB:% .f:KB", unit: UnitKB, fmt: "% .2f", current: 2 * int64(_KB), elapsed: 1 * time.Second, expected: "2.00 KB/s", }, { name: "UnitKB:%d:MB", unit: UnitKB, fmt: "%d", current: 2 * int64(_MB), elapsed: 1 * time.Second, expected: "2MB/s", }, { name: "UnitKB:% .2f:MB", unit: UnitKB, fmt: "% .2f", current: 2 * int64(_MB), elapsed: 1 * time.Second, expected: "2.00 MB/s", }, { name: "UnitKB:%d:GB", unit: UnitKB, fmt: "%d", current: 2 * int64(_GB), elapsed: 1 * time.Second, expected: "2GB/s", }, { name: "UnitKB:% .2f:GB", unit: UnitKB, fmt: "% .2f", current: 2 * int64(_GB), elapsed: 1 * time.Second, expected: "2.00 GB/s", }, { name: "UnitKB:%d:TB", unit: UnitKB, fmt: "%d", current: 2 * int64(_TB), elapsed: 1 * time.Second, expected: "2TB/s", }, { name: "UnitKB:% .2f:TB", unit: UnitKB, fmt: "% .2f", current: 2 * int64(_TB), elapsed: 1 * time.Second, expected: "2.00 TB/s", }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { decor := NewAverageSpeed(tc.unit, tc.fmt, time.Now().Add(-tc.elapsed)) stat := Statistics{ Current: tc.current, } res := decor.Decor(stat) if res != tc.expected { t.Fatalf("expected: %q, got: %q\n", tc.expected, res) } }) } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/eta.go0000644000000000000000000001347315024302472024153 0ustar rootrootpackage decor import ( "fmt" "math" "time" "github.com/VividCortex/ewma" ) // TimeNormalizer interface. Implementors could be passed into // MovingAverageETA, in order to affect i.e. normalize its output. type TimeNormalizer interface { Normalize(time.Duration) time.Duration } // TimeNormalizerFunc is function type adapter to convert function // into TimeNormalizer. type TimeNormalizerFunc func(time.Duration) time.Duration func (f TimeNormalizerFunc) Normalize(src time.Duration) time.Duration { return f(src) } // EwmaETA exponential-weighted-moving-average based ETA decorator. // For this decorator to work correctly you have to measure each // iteration's duration and pass it to the // *Bar.DecoratorEwmaUpdate(time.Duration) method after each increment. func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { var average ewma.MovingAverage if age == 0 { average = ewma.NewMovingAverage() } else { average = ewma.NewMovingAverage(age) } return MovingAverageETA(style, NewThreadSafeMovingAverage(average), nil, wcc...) } // MovingAverageETA decorator relies on MovingAverage implementation to calculate its average. // // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `average` implementation of MovingAverage interface // // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // // `wcc` optional WC config // func MovingAverageETA(style TimeStyle, average ewma.MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { d := &movingAverageETA{ WC: initWC(wcc...), average: average, normalizer: normalizer, producer: chooseTimeProducer(style), } return d } type movingAverageETA struct { WC average ewma.MovingAverage normalizer TimeNormalizer producer func(time.Duration) string } func (d *movingAverageETA) Decor(s Statistics) string { v := math.Round(d.average.Value()) remaining := time.Duration((s.Total - s.Current) * int64(v)) if d.normalizer != nil { remaining = d.normalizer.Normalize(remaining) } return d.FormatMsg(d.producer(remaining)) } func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { durPerItem := float64(dur) / float64(n) if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { return } d.average.Add(durPerItem) } // AverageETA decorator. It's wrapper of NewAverageETA. // // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `wcc` optional WC config // func AverageETA(style TimeStyle, wcc ...WC) Decorator { return NewAverageETA(style, time.Now(), nil, wcc...) } // NewAverageETA decorator with user provided start time. // // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `startTime` start time // // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // // `wcc` optional WC config // func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator { d := &averageETA{ WC: initWC(wcc...), startTime: startTime, normalizer: normalizer, producer: chooseTimeProducer(style), } return d } type averageETA struct { WC startTime time.Time normalizer TimeNormalizer producer func(time.Duration) string } func (d *averageETA) Decor(s Statistics) string { var remaining time.Duration if s.Current != 0 { durPerItem := float64(time.Since(d.startTime)) / float64(s.Current) durPerItem = math.Round(durPerItem) remaining = time.Duration((s.Total - s.Current) * int64(durPerItem)) if d.normalizer != nil { remaining = d.normalizer.Normalize(remaining) } } return d.FormatMsg(d.producer(remaining)) } func (d *averageETA) AverageAdjust(startTime time.Time) { d.startTime = startTime } // MaxTolerateTimeNormalizer returns implementation of TimeNormalizer. func MaxTolerateTimeNormalizer(maxTolerate time.Duration) TimeNormalizer { var normalized time.Duration var lastCall time.Time return TimeNormalizerFunc(func(remaining time.Duration) time.Duration { if diff := normalized - remaining; diff <= 0 || diff > maxTolerate || remaining < time.Minute { normalized = remaining lastCall = time.Now() return remaining } normalized -= time.Since(lastCall) lastCall = time.Now() return normalized }) } // FixedIntervalTimeNormalizer returns implementation of TimeNormalizer. func FixedIntervalTimeNormalizer(updInterval int) TimeNormalizer { var normalized time.Duration var lastCall time.Time var count int return TimeNormalizerFunc(func(remaining time.Duration) time.Duration { if count == 0 || remaining < time.Minute { count = updInterval normalized = remaining lastCall = time.Now() return remaining } count-- normalized -= time.Since(lastCall) lastCall = time.Now() return normalized }) } func chooseTimeProducer(style TimeStyle) func(time.Duration) string { switch style { case ET_STYLE_HHMMSS: return func(remaining time.Duration) string { hours := int64(remaining/time.Hour) % 60 minutes := int64(remaining/time.Minute) % 60 seconds := int64(remaining/time.Second) % 60 return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) } case ET_STYLE_HHMM: return func(remaining time.Duration) string { hours := int64(remaining/time.Hour) % 60 minutes := int64(remaining/time.Minute) % 60 return fmt.Sprintf("%02d:%02d", hours, minutes) } case ET_STYLE_MMSS: return func(remaining time.Duration) string { hours := int64(remaining/time.Hour) % 60 minutes := int64(remaining/time.Minute) % 60 seconds := int64(remaining/time.Second) % 60 if hours > 0 { return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) } return fmt.Sprintf("%02d:%02d", minutes, seconds) } default: return func(remaining time.Duration) string { // strip off nanoseconds return ((remaining / time.Second) * time.Second).String() } } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/elapsed.go0000644000000000000000000000137215024302472025012 0ustar rootrootpackage decor import ( "time" ) // Elapsed decorator. It's wrapper of NewElapsed. // // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `wcc` optional WC config // func Elapsed(style TimeStyle, wcc ...WC) Decorator { return NewElapsed(style, time.Now(), wcc...) } // NewElapsed returns elapsed time decorator. // // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `startTime` start time // // `wcc` optional WC config // func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator { var msg string producer := chooseTimeProducer(style) fn := func(s Statistics) string { if !s.Completed { msg = producer(time.Since(startTime)) } return msg } return Any(fn, wcc...) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/sizeb1000_string.go0000644000000000000000000000157215024302472026402 0ustar rootroot// Code generated by "stringer -type=SizeB1000 -trimprefix=_"; DO NOT EDIT. package decor import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[_b-1] _ = x[_KB-1000] _ = x[_MB-1000000] _ = x[_GB-1000000000] _ = x[_TB-1000000000000] } const ( _SizeB1000_name_0 = "b" _SizeB1000_name_1 = "KB" _SizeB1000_name_2 = "MB" _SizeB1000_name_3 = "GB" _SizeB1000_name_4 = "TB" ) func (i SizeB1000) String() string { switch { case i == 1: return _SizeB1000_name_0 case i == 1000: return _SizeB1000_name_1 case i == 1000000: return _SizeB1000_name_2 case i == 1000000000: return _SizeB1000_name_3 case i == 1000000000000: return _SizeB1000_name_4 default: return "SizeB1000(" + strconv.FormatInt(int64(i), 10) + ")" } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/on_complete.go0000644000000000000000000000142415024302472025677 0ustar rootrootpackage decor // OnComplete returns decorator, which wraps provided decorator, with // sole purpose to display provided message on complete event. // // `decorator` Decorator to wrap // // `message` message to display on complete event // func OnComplete(decorator Decorator, message string) Decorator { d := &onCompleteWrapper{ Decorator: decorator, msg: message, } if md, ok := decorator.(*mergeDecorator); ok { d.Decorator, md.Decorator = md.Decorator, d return md } return d } type onCompleteWrapper struct { Decorator msg string } func (d *onCompleteWrapper) Decor(s Statistics) string { if s.Completed { wc := d.GetConf() return wc.FormatMsg(d.msg) } return d.Decorator.Decor(s) } func (d *onCompleteWrapper) Base() Decorator { return d.Decorator } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/counters.go0000644000000000000000000001476515024302472025251 0ustar rootrootpackage decor import ( "fmt" "strings" ) const ( _ = iota UnitKiB UnitKB ) // CountersNoUnit is a wrapper around Counters with no unit param. func CountersNoUnit(pairFmt string, wcc ...WC) Decorator { return Counters(0, pairFmt, wcc...) } // CountersKibiByte is a wrapper around Counters with predefined unit // UnitKiB (bytes/1024). func CountersKibiByte(pairFmt string, wcc ...WC) Decorator { return Counters(UnitKiB, pairFmt, wcc...) } // CountersKiloByte is a wrapper around Counters with predefined unit // UnitKB (bytes/1000). func CountersKiloByte(pairFmt string, wcc ...WC) Decorator { return Counters(UnitKB, pairFmt, wcc...) } // Counters decorator with dynamic unit measure adjustment. // // `unit` one of [0|UnitKiB|UnitKB] zero for no unit // // `pairFmt` printf compatible verbs for current and total pair // // `wcc` optional WC config // // pairFmt example if unit=UnitKB: // // pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB" // pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB" // pairFmt="%d / %d" output: "1MB / 12MB" // pairFmt="% d / % d" output: "1 MB / 12 MB" // func Counters(unit int, pairFmt string, wcc ...WC) Decorator { producer := func(unit int, pairFmt string) DecorFunc { if pairFmt == "" { pairFmt = "%d / %d" } else if strings.Count(pairFmt, "%") != 2 { panic("expected pairFmt with exactly 2 verbs") } switch unit { case UnitKiB: return func(s Statistics) string { return fmt.Sprintf(pairFmt, SizeB1024(s.Current), SizeB1024(s.Total)) } case UnitKB: return func(s Statistics) string { return fmt.Sprintf(pairFmt, SizeB1000(s.Current), SizeB1000(s.Total)) } default: return func(s Statistics) string { return fmt.Sprintf(pairFmt, s.Current, s.Total) } } } return Any(producer(unit, pairFmt), wcc...) } // TotalNoUnit is a wrapper around Total with no unit param. func TotalNoUnit(format string, wcc ...WC) Decorator { return Total(0, format, wcc...) } // TotalKibiByte is a wrapper around Total with predefined unit // UnitKiB (bytes/1024). func TotalKibiByte(format string, wcc ...WC) Decorator { return Total(UnitKiB, format, wcc...) } // TotalKiloByte is a wrapper around Total with predefined unit // UnitKB (bytes/1000). func TotalKiloByte(format string, wcc ...WC) Decorator { return Total(UnitKB, format, wcc...) } // Total decorator with dynamic unit measure adjustment. // // `unit` one of [0|UnitKiB|UnitKB] zero for no unit // // `format` printf compatible verb for Total // // `wcc` optional WC config // // format example if unit=UnitKiB: // // format="%.1f" output: "12.0MiB" // format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" // func Total(unit int, format string, wcc ...WC) Decorator { producer := func(unit int, format string) DecorFunc { if format == "" { format = "%d" } else if strings.Count(format, "%") != 1 { panic("expected format with exactly 1 verb") } switch unit { case UnitKiB: return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Total)) } case UnitKB: return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Total)) } default: return func(s Statistics) string { return fmt.Sprintf(format, s.Total) } } } return Any(producer(unit, format), wcc...) } // CurrentNoUnit is a wrapper around Current with no unit param. func CurrentNoUnit(format string, wcc ...WC) Decorator { return Current(0, format, wcc...) } // CurrentKibiByte is a wrapper around Current with predefined unit // UnitKiB (bytes/1024). func CurrentKibiByte(format string, wcc ...WC) Decorator { return Current(UnitKiB, format, wcc...) } // CurrentKiloByte is a wrapper around Current with predefined unit // UnitKB (bytes/1000). func CurrentKiloByte(format string, wcc ...WC) Decorator { return Current(UnitKB, format, wcc...) } // Current decorator with dynamic unit measure adjustment. // // `unit` one of [0|UnitKiB|UnitKB] zero for no unit // // `format` printf compatible verb for Current // // `wcc` optional WC config // // format example if unit=UnitKiB: // // format="%.1f" output: "12.0MiB" // format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" // func Current(unit int, format string, wcc ...WC) Decorator { producer := func(unit int, format string) DecorFunc { if format == "" { format = "%d" } else if strings.Count(format, "%") != 1 { panic("expected format with exactly 1 verb") } switch unit { case UnitKiB: return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Current)) } case UnitKB: return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Current)) } default: return func(s Statistics) string { return fmt.Sprintf(format, s.Current) } } } return Any(producer(unit, format), wcc...) } // InvertedCurrentNoUnit is a wrapper around InvertedCurrent with no unit param. func InvertedCurrentNoUnit(format string, wcc ...WC) Decorator { return InvertedCurrent(0, format, wcc...) } // InvertedCurrentKibiByte is a wrapper around InvertedCurrent with predefined unit // UnitKiB (bytes/1024). func InvertedCurrentKibiByte(format string, wcc ...WC) Decorator { return InvertedCurrent(UnitKiB, format, wcc...) } // InvertedCurrentKiloByte is a wrapper around InvertedCurrent with predefined unit // UnitKB (bytes/1000). func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator { return InvertedCurrent(UnitKB, format, wcc...) } // InvertedCurrent decorator with dynamic unit measure adjustment. // // `unit` one of [0|UnitKiB|UnitKB] zero for no unit // // `format` printf compatible verb for InvertedCurrent // // `wcc` optional WC config // // format example if unit=UnitKiB: // // format="%.1f" output: "12.0MiB" // format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" // func InvertedCurrent(unit int, format string, wcc ...WC) Decorator { producer := func(unit int, format string) DecorFunc { if format == "" { format = "%d" } else if strings.Count(format, "%") != 1 { panic("expected format with exactly 1 verb") } switch unit { case UnitKiB: return func(s Statistics) string { return fmt.Sprintf(format, SizeB1024(s.Total-s.Current)) } case UnitKB: return func(s Statistics) string { return fmt.Sprintf(format, SizeB1000(s.Total-s.Current)) } default: return func(s Statistics) string { return fmt.Sprintf(format, s.Total-s.Current) } } } return Any(producer(unit, format), wcc...) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/decor/any.go0000644000000000000000000000057515024302472024170 0ustar rootrootpackage decor // Any decorator displays text, that can be changed during decorator's // lifetime via provided DecorFunc. // // `fn` DecorFunc callback // // `wcc` optional WC config // func Any(fn DecorFunc, wcc ...WC) Decorator { return &any{initWC(wcc...), fn} } type any struct { WC fn DecorFunc } func (d *any) Decor(s Statistics) string { return d.FormatMsg(d.fn(s)) } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/barbench_test.go0000644000000000000000000000174715024302472025112 0ustar rootrootpackage mpb import ( "io/ioutil" "testing" "github.com/vbauerster/mpb/v6/decor" ) func BenchmarkIncrSingleBar(b *testing.B) { p := New(WithOutput(ioutil.Discard), WithWidth(80)) bar := p.AddBar(int64(b.N)) for i := 0; i < b.N; i++ { bar.Increment() } } func BenchmarkIncrSingleBarWhileIsNotCompleted(b *testing.B) { p := New(WithOutput(ioutil.Discard), WithWidth(80)) bar := p.AddBar(int64(b.N)) for !bar.Completed() { bar.Increment() } } func BenchmarkIncrSingleBarWithNameDecorator(b *testing.B) { p := New(WithOutput(ioutil.Discard), WithWidth(80)) bar := p.AddBar(int64(b.N), PrependDecorators(decor.Name("test"))) for i := 0; i < b.N; i++ { bar.Increment() } } func BenchmarkIncrSingleBarWithNameAndEwmaETADecorator(b *testing.B) { p := New(WithOutput(ioutil.Discard), WithWidth(80)) bar := p.AddBar(int64(b.N), PrependDecorators(decor.Name("test")), AppendDecorators(decor.EwmaETA(decor.ET_STYLE_GO, 60)), ) for i := 0; i < b.N; i++ { bar.Increment() } } dependencies/pkg/mod/github.com/vbauerster/mpb/v6@v6.0.4/bar_option.go0000644000000000000000000000755615024302472024447 0ustar rootrootpackage mpb import ( "bytes" "io" "github.com/vbauerster/mpb/v6/decor" "github.com/vbauerster/mpb/v6/internal" ) // BarOption is a func option to alter default behavior of a bar. type BarOption func(*bState) func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) { type mergeWrapper interface { MergeUnwrap() []decor.Decorator } for _, decorator := range decorators { if mw, ok := decorator.(mergeWrapper); ok { *dest = append(*dest, mw.MergeUnwrap()...) } *dest = append(*dest, decorator) } } // AppendDecorators let you inject decorators to the bar's right side. func AppendDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { s.addDecorators(&s.aDecorators, decorators...) } } // PrependDecorators let you inject decorators to the bar's left side. func PrependDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { s.addDecorators(&s.pDecorators, decorators...) } } // BarID sets bar id. func BarID(id int) BarOption { return func(s *bState) { s.id = id } } // BarWidth sets bar width independent of the container. func BarWidth(width int) BarOption { return func(s *bState) { s.reqWidth = width } } // BarQueueAfter queues this (being constructed) bar to relplace // runningBar after it has been completed. func BarQueueAfter(runningBar *Bar) BarOption { if runningBar == nil { return nil } return func(s *bState) { s.runningBar = runningBar } } // BarRemoveOnComplete removes both bar's filler and its decorators // on complete event. func BarRemoveOnComplete() BarOption { return func(s *bState) { s.dropOnComplete = true } } // BarFillerClearOnComplete clears bar's filler on complete event. // It's shortcut for BarFillerOnComplete(""). func BarFillerClearOnComplete() BarOption { return BarFillerOnComplete("") } // BarFillerOnComplete replaces bar's filler with message, on complete event. func BarFillerOnComplete(message string) BarOption { return BarFillerMiddleware(func(base BarFiller) BarFiller { return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) { if st.Completed { io.WriteString(w, message) } else { base.Fill(w, reqWidth, st) } }) }) } // BarFillerMiddleware provides a way to augment the underlying BarFiller. func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption { return func(s *bState) { s.middleware = middle } } // BarPriority sets bar's priority. Zero is highest priority, i.e. bar // will be on top. If `BarReplaceOnComplete` option is supplied, this // option is ignored. func BarPriority(priority int) BarOption { return func(s *bState) { s.priority = priority } } // BarExtender provides a way to extend bar to the next new line. func BarExtender(filler BarFiller) BarOption { if filler == nil { return nil } return func(s *bState) { s.extender = makeExtenderFunc(filler) } } func makeExtenderFunc(filler BarFiller) extenderFunc { buf := new(bytes.Buffer) return func(r io.Reader, reqWidth int, st decor.Statistics) (io.Reader, int) { filler.Fill(buf, reqWidth, st) return io.MultiReader(r, buf), bytes.Count(buf.Bytes(), []byte("\n")) } } // BarFillerTrim removes leading and trailing space around the underlying BarFiller. func BarFillerTrim() BarOption { return func(s *bState) { s.trimSpace = true } } // BarNoPop disables bar pop out of container. Effective when // PopCompletedMode of container is enabled. func BarNoPop() BarOption { return func(s *bState) { s.noPop = true } } // BarOptional will invoke provided option only when pick is true. func BarOptional(option BarOption, pick bool) BarOption { return BarOptOn(option, internal.Predicate(pick)) } // BarOptOn will invoke provided option only when higher order predicate // evaluates to true. func BarOptOn(option BarOption, predicate func() bool) BarOption { if predicate() { return option } return nil } dependencies/pkg/mod/github.com/icinga/0000775000000000000000000000000015024302466017037 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/0000755000000000000000000000000015024302466023207 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/structify/0000755000000000000000000000000015024302466025243 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/structify/structify.go0000644000000000000000000001067315024302466027635 0ustar rootrootpackage structify import ( "encoding" "fmt" "github.com/pkg/errors" "golang.org/x/exp/constraints" "reflect" "strconv" "strings" "unsafe" ) // structBranch represents either a leaf or a subTree. type structBranch struct { // field specifies the struct field index. field int // leaf specifies the map key to parse the struct field from. leaf string // subTree specifies the struct field's inner tree. subTree []structBranch } type MapStructifier = func(map[string]interface{}) (interface{}, error) // MakeMapStructifier builds a function which parses a map's string values into a new struct of type t // and returns a pointer to it. tag specifies which tag connects struct fields to map keys. // MakeMapStructifier panics if it detects an unsupported type (suitable for usage in init() or global vars). func MakeMapStructifier(t reflect.Type, tag string, initer func(any)) MapStructifier { tree := buildStructTree(t, tag) return func(kv map[string]interface{}) (interface{}, error) { vPtr := reflect.New(t) ptr := vPtr.Interface() if initer != nil { initer(ptr) } vPtrElem := vPtr.Elem() err := errors.Wrapf(structifyMapByTree(kv, tree, vPtrElem, vPtrElem, new([]int)), "can't structify map %#v by tree %#v", kv, tree) return ptr, err } } // buildStructTree assembles a tree which represents the struct t based on tag. func buildStructTree(t reflect.Type, tag string) []structBranch { var tree []structBranch numFields := t.NumField() for i := 0; i < numFields; i++ { if field := t.Field(i); field.PkgPath == "" { switch tagValue := field.Tag.Get(tag); tagValue { case "", "-": case ",inline": if subTree := buildStructTree(field.Type, tag); subTree != nil { tree = append(tree, structBranch{i, "", subTree}) } default: // If parseString doesn't support *T, it'll panic. _ = parseString("", reflect.New(field.Type).Interface()) tree = append(tree, structBranch{i, tagValue, nil}) } } } return tree } // structifyMapByTree parses src's string values into the struct dest according to tree's specification. func structifyMapByTree(src map[string]interface{}, tree []structBranch, dest, root reflect.Value, stack *[]int) error { *stack = append(*stack, 0) defer func() { *stack = (*stack)[:len(*stack)-1] }() for _, branch := range tree { (*stack)[len(*stack)-1] = branch.field if branch.subTree == nil { if v, ok := src[branch.leaf]; ok { if vs, ok := v.(string); ok { if err := parseString(vs, dest.Field(branch.field).Addr().Interface()); err != nil { rt := root.Type() typ := rt var path []string for _, i := range *stack { f := typ.Field(i) path = append(path, f.Name) typ = f.Type } return errors.Wrapf(err, "can't parse %s into the %s %s#%s: %s", branch.leaf, typ.Name(), rt.Name(), strings.Join(path, "."), vs) } } } } else if err := structifyMapByTree(src, branch.subTree, dest.Field(branch.field), root, stack); err != nil { return err } } return nil } // parseString parses src into *dest. func parseString(src string, dest interface{}) error { switch ptr := dest.(type) { case encoding.TextUnmarshaler: return ptr.UnmarshalText([]byte(src)) case *string: *ptr = src return nil case **string: *ptr = &src return nil case *uint8: return parseUint(src, ptr) case *uint16: return parseUint(src, ptr) case *uint32: return parseUint(src, ptr) case *uint64: return parseUint(src, ptr) case *int8: return parseInt(src, ptr) case *int16: return parseInt(src, ptr) case *int32: return parseInt(src, ptr) case *int64: return parseInt(src, ptr) case *float32: return parseFloat(src, ptr) case *float64: return parseFloat(src, ptr) default: panic(fmt.Sprintf("unsupported type: %T", dest)) } } // parseUint parses src into *dest. func parseUint[T constraints.Unsigned](src string, dest *T) error { i, err := strconv.ParseUint(src, 10, bitSizeOf[T]()) if err == nil { *dest = T(i) } return err } // parseInt parses src into *dest. func parseInt[T constraints.Signed](src string, dest *T) error { i, err := strconv.ParseInt(src, 10, bitSizeOf[T]()) if err == nil { *dest = T(i) } return err } // parseFloat parses src into *dest. func parseFloat[T constraints.Float](src string, dest *T) error { f, err := strconv.ParseFloat(src, bitSizeOf[T]()) if err == nil { *dest = T(f) } return err } func bitSizeOf[T any]() int { var x T return int(unsafe.Sizeof(x) * 8) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/LICENSE0000644000000000000000000000205415024302466024215 0ustar rootrootMIT License Copyright (c) 2024 Icinga GmbH Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/objectpacker/0000755000000000000000000000000015024302466025643 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/objectpacker/objectpacker_test.go0000644000000000000000000001220115024302466031661 0ustar rootrootpackage objectpacker import ( "bytes" "github.com/icinga/icinga-go-library/types" "github.com/pkg/errors" "io" "testing" ) // limitedWriter allows writing a specific amount of data. type limitedWriter struct { // limit specifies how many bytes to allow to write. limit int } var _ io.Writer = (*limitedWriter)(nil) // Write returns io.EOF once lw.limit is exceeded, nil otherwise. func (lw *limitedWriter) Write(p []byte) (n int, err error) { if len(p) <= lw.limit { lw.limit -= len(p) return len(p), nil } n = lw.limit err = io.EOF lw.limit = 0 return } func TestLimitedWriter_Write(t *testing.T) { assertLimitedWriter_Write(t, 3, []byte{1, 2}, 2, nil, 1) assertLimitedWriter_Write(t, 3, []byte{1, 2, 3}, 3, nil, 0) assertLimitedWriter_Write(t, 3, []byte{1, 2, 3, 4}, 3, io.EOF, 0) assertLimitedWriter_Write(t, 0, []byte{1}, 0, io.EOF, 0) assertLimitedWriter_Write(t, 0, nil, 0, nil, 0) } func assertLimitedWriter_Write(t *testing.T, limitBefore int, p []byte, n int, err error, limitAfter int) { t.Helper() lw := limitedWriter{limitBefore} actualN, actualErr := lw.Write(p) if !errors.Is(actualErr, err) { t.Errorf("_, err := (&limitedWriter{%d}).Write(%#v); err != %#v", limitBefore, p, err) } if actualN != n { t.Errorf("n, _ := (&limitedWriter{%d}).Write(%#v); n != %d", limitBefore, p, n) } if lw.limit != limitAfter { t.Errorf("lw := limitedWriter{%d}; lw.Write(%#v); lw.limit != %d", limitBefore, p, limitAfter) } } func TestPackAny(t *testing.T) { assertPackAny(t, nil, []byte{0}) assertPackAny(t, false, []byte{1}) assertPackAny(t, true, []byte{2}) assertPackAnyPanic(t, -42, 0) assertPackAnyPanic(t, int8(-42), 0) assertPackAnyPanic(t, int16(-42), 0) assertPackAnyPanic(t, int32(-42), 0) assertPackAnyPanic(t, int64(-42), 0) assertPackAnyPanic(t, uint(42), 0) assertPackAnyPanic(t, uint8(42), 0) assertPackAnyPanic(t, uint16(42), 0) assertPackAnyPanic(t, uint32(42), 0) assertPackAnyPanic(t, uint64(42), 0) assertPackAnyPanic(t, uintptr(42), 0) assertPackAnyPanic(t, float32(-42.5), 0) assertPackAny(t, -42.5, []byte{3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0}) assertPackAnyPanic(t, []struct{}(nil), 9) assertPackAnyPanic(t, []struct{}{}, 9) assertPackAny(t, []interface{}{nil, true, -42.5}, []byte{ 5, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0, }) assertPackAny(t, []string{"", "a"}, []byte{ 5, 0, 0, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 1, 'a', }) assertPackAnyPanic(t, []interface{}{0 + 0i}, 9) assertPackAnyPanic(t, map[struct{}]struct{}(nil), 9) assertPackAnyPanic(t, map[struct{}]struct{}{}, 9) assertPackAny(t, map[interface{}]interface{}{true: "", "nil": -42.5}, []byte{ 6, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 'n', 'i', 'l', 3, 0xc0, 0x45, 0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 't', 'r', 'u', 'e', 4, 0, 0, 0, 0, 0, 0, 0, 0, }) assertPackAny(t, map[string]float64{"": 42}, []byte{ 6, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0x40, 0x45, 0, 0, 0, 0, 0, 0, }) assertPackAny(t, map[[1]byte]bool{{42}: true}, []byte{ 6, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 42, 2, }) assertPackAnyPanic(t, map[struct{}]struct{}{{}: {}}, 9) assertPackAny(t, (*string)(nil), []byte{0}) assertPackAnyPanic(t, (*int)(nil), 0) assertPackAny(t, new(float64), []byte{3, 0, 0, 0, 0, 0, 0, 0, 0}) assertPackAny(t, "", []byte{4, 0, 0, 0, 0, 0, 0, 0, 0}) assertPackAny(t, "a", []byte{4, 0, 0, 0, 0, 0, 0, 0, 1, 'a'}) assertPackAny(t, "ä", []byte{4, 0, 0, 0, 0, 0, 0, 0, 2, 0xc3, 0xa4}) { var binary [256]byte for i := range binary { binary[i] = byte(i) } assertPackAny(t, binary, append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...)) assertPackAny(t, binary[:], append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...)) assertPackAny(t, types.Binary(binary[:]), append([]byte{4, 0, 0, 0, 0, 0, 0, 1, 0}, binary[:]...)) } { type myByte byte assertPackAnyPanic(t, []myByte(nil), 9) } assertPackAnyPanic(t, complex64(0+0i), 0) assertPackAnyPanic(t, 0+0i, 0) assertPackAnyPanic(t, make(chan struct{}), 0) assertPackAnyPanic(t, func() {}, 0) assertPackAnyPanic(t, struct{}{}, 0) assertPackAnyPanic(t, uintptr(0), 0) } func assertPackAny(t *testing.T, in interface{}, out []byte) { t.Helper() { buf := &bytes.Buffer{} if err := PackAny(in, buf); err == nil { if !bytes.Equal(buf.Bytes(), out) { t.Errorf("buf := &bytes.Buffer{}; packAny(%#v, buf); !bytes.Equal(buf.Bytes(), %#v)", in, out) } } else { t.Errorf("packAny(%#v, &bytes.Buffer{}) != nil", in) } } for i := 0; i < len(out); i++ { if !errors.Is(PackAny(in, &limitedWriter{i}), io.EOF) { t.Errorf("packAny(%#v, &limitedWriter{%d}) != io.EOF", in, i) } } } func assertPackAnyPanic(t *testing.T, in interface{}, allowToWrite int) { t.Helper() for i := 0; i < allowToWrite; i++ { if !errors.Is(PackAny(in, &limitedWriter{i}), io.EOF) { t.Errorf("packAny(%#v, &limitedWriter{%d}) != io.EOF", in, i) } } defer func() { t.Helper() if r := recover(); r == nil { t.Errorf("packAny(%#v, &limitedWriter{%d}) didn't panic", in, allowToWrite) } }() _ = PackAny(in, &limitedWriter{allowToWrite}) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/objectpacker/objectpacker.go0000644000000000000000000001364315024302466030635 0ustar rootrootpackage objectpacker import ( "bytes" "encoding/binary" "fmt" "github.com/pkg/errors" "io" "reflect" "sort" ) // MustPackSlice calls PackAny using items and panics if there was an error. func MustPackSlice(items ...interface{}) []byte { var buf bytes.Buffer if err := PackAny(items, &buf); err != nil { panic(err) } return buf.Bytes() } // PackAny packs any JSON-encodable value (ex. structs, also ignores interfaces like encoding.TextMarshaler) // to a BSON-similar format suitable for consistent hashing. Spec: // // PackAny(nil) => 0x0 // PackAny(false) => 0x1 // PackAny(true) => 0x2 // PackAny(float64(42)) => 0x3 ieee754_binary64_bigendian(42) // PackAny("exämple") => 0x4 uint64_bigendian(len([]byte("exämple"))) []byte("exämple") // PackAny([]uint8{0x42}) => 0x4 uint64_bigendian(len([]uint8{0x42})) []uint8{0x42} // PackAny([1]uint8{0x42}) => 0x4 uint64_bigendian(len([1]uint8{0x42})) [1]uint8{0x42} // PackAny([]T{x,y}) => 0x5 uint64_bigendian(len([]T{x,y})) PackAny(x) PackAny(y) // PackAny(map[K]V{x:y}) => 0x6 uint64_bigendian(len(map[K]V{x:y})) len(map_key(x)) map_key(x) PackAny(y) // PackAny((*T)(nil)) => 0x0 // PackAny((*T)(0x42)) => PackAny(*(*T)(0x42)) // PackAny(x) => panic() // // map_key([1]uint8{0x42}) => [1]uint8{0x42} // map_key(x) => []byte(fmt.Sprint(x)) func PackAny(in interface{}, out io.Writer) error { return errors.Wrapf(packValue(reflect.ValueOf(in), out), "can't pack %#v", in) } var tByte = reflect.TypeOf(byte(0)) var tBytes = reflect.TypeOf([]uint8(nil)) // packValue does the actual job of packAny and just exists for recursion w/o unnecessary reflect.ValueOf calls. func packValue(in reflect.Value, out io.Writer) error { switch kind := in.Kind(); kind { case reflect.Invalid: // nil _, err := out.Write([]byte{0}) return err case reflect.Bool: if in.Bool() { _, err := out.Write([]byte{2}) return err } else { _, err := out.Write([]byte{1}) return err } case reflect.Float64: if _, err := out.Write([]byte{3}); err != nil { return err } return binary.Write(out, binary.BigEndian, in.Float()) case reflect.Array, reflect.Slice: if typ := in.Type(); typ.Elem() == tByte { if kind == reflect.Array { if !in.CanAddr() { vNewElem := reflect.New(typ).Elem() vNewElem.Set(in) in = vNewElem } in = in.Slice(0, in.Len()) } // Pack []byte as string, not array of numbers. b, ok := in.Convert(tBytes).Interface().([]uint8) if !ok { return fmt.Errorf("cannot convert data to []uint8") } return packString(b, out) } if _, err := out.Write([]byte{5}); err != nil { return err } l := in.Len() // #nosec G115 -- in.Len() is a non-negative number, thus cannot overflow for conversion to uint64 if err := binary.Write(out, binary.BigEndian, uint64(l)); err != nil { return err } for i := 0; i < l; i++ { if err := packValue(in.Index(i), out); err != nil { return err } } // If there aren't any values to pack, ... if l < 1 { // ... create one and pack it - panics on disallowed type. _ = packValue(reflect.Zero(in.Type().Elem()), io.Discard) } return nil case reflect.Interface: return packValue(in.Elem(), out) case reflect.Map: type kv struct { key []byte value reflect.Value } if _, err := out.Write([]byte{6}); err != nil { return err } l := in.Len() // #nosec G115 -- in.Len() is a non-negative number, thus cannot overflow for conversion to uint64 if err := binary.Write(out, binary.BigEndian, uint64(l)); err != nil { return err } sorted := make([]kv, 0, l) { iter := in.MapRange() for iter.Next() { var packedKey []byte if key := iter.Key(); key.Kind() == reflect.Array { if typ := key.Type(); typ.Elem() == tByte { if !key.CanAddr() { vNewElem := reflect.New(typ).Elem() vNewElem.Set(key) key = vNewElem } p, ok := key.Slice(0, key.Len()).Interface().([]byte) if !ok { return fmt.Errorf("cannot convert data to []byte") } packedKey = p } else { // Not just stringify the key (below), but also pack it (here) - panics on disallowed type. _ = packValue(iter.Key(), io.Discard) packedKey = []byte(fmt.Sprint(key.Interface())) } } else { // Not just stringify the key (below), but also pack it (here) - panics on disallowed type. _ = packValue(iter.Key(), io.Discard) packedKey = []byte(fmt.Sprint(key.Interface())) } sorted = append(sorted, kv{packedKey, iter.Value()}) } } sort.Slice(sorted, func(i, j int) bool { return bytes.Compare(sorted[i].key, sorted[j].key) < 0 }) for _, kv := range sorted { if err := binary.Write(out, binary.BigEndian, uint64(len(kv.key))); err != nil { return err } if _, err := out.Write(kv.key); err != nil { return err } if err := packValue(kv.value, out); err != nil { return err } } // If there aren't any key-value pairs to pack, ... if l < 1 { typ := in.Type() // ... create one and pack it - panics on disallowed type. _ = packValue(reflect.Zero(typ.Key()), io.Discard) _ = packValue(reflect.Zero(typ.Elem()), io.Discard) } return nil case reflect.Ptr: if in.IsNil() { err := packValue(reflect.Value{}, out) // Create a fictive referenced value and pack it - panics on disallowed type. _ = packValue(reflect.Zero(in.Type().Elem()), io.Discard) return err } else { return packValue(in.Elem(), out) } case reflect.String: return packString([]byte(in.String()), out) default: panic("bad type: " + in.Kind().String()) } } // packString deduplicates string packing of multiple locations in packValue. func packString(in []byte, out io.Writer) error { if _, err := out.Write([]byte{4}); err != nil { return err } if err := binary.Write(out, binary.BigEndian, uint64(len(in))); err != nil { return err } _, err := out.Write(in) return err } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/backoff/0000755000000000000000000000000015024302466024602 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/backoff/backoff.go0000644000000000000000000000242115024302466026523 0ustar rootrootpackage backoff import ( "math/rand" "time" ) // Backoff returns the backoff duration for a specific retry attempt. type Backoff func(uint64) time.Duration // DefaultBackoff is our opinionated Backoff function for retry.WithBackoff - between 128ms and 1m. var DefaultBackoff = NewExponentialWithJitter(128*time.Millisecond, 1*time.Minute) // NewExponentialWithJitter returns an exponentially increasing [Backoff] implementation. // // The calculated [time.Duration] values are within [min, max], exponentially increasing and slightly randomized. // If min or max are zero or negative, they will default to 100ms and 10s, respectively. It panics if min >= max. func NewExponentialWithJitter(min, max time.Duration) Backoff { if min <= 0 { min = 100 * time.Millisecond } if max <= 0 { max = 10 * time.Second } if min >= max { panic("max must be greater than min") } return func(attempt uint64) time.Duration { e := min << attempt // If the bit shift already overflows, return max. if e < min { return max } // Introduce jitter. e <- [min/2, int64_max) e = e/2 + time.Duration(rand.Int63n(int64(e/2))) // #nosec G404 -- we don't need crypto/rand here though. // Remap e to [min, max]. if e < min { e = min } if e > max { e = max } return e } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/backoff/backoff_test.go0000644000000000000000000000217315024302466027566 0ustar rootrootpackage backoff import ( "github.com/stretchr/testify/require" "math" "testing" "time" ) func TestNewExponentialWithJitter(t *testing.T) { tests := []struct { name string min time.Duration max time.Duration }{ {"defaults", 100 * time.Millisecond, 10 * time.Second}, {"small-values", time.Millisecond, time.Second}, {"huge-values", time.Minute, time.Hour}, {"small-range", time.Millisecond, 2 * time.Millisecond}, {"huge-range", time.Millisecond, time.Hour}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := NewExponentialWithJitter(tt.min, tt.max) require.Equal(t, tt.min, r(0)) require.Equal(t, tt.max, r(math.MaxUint64)) // Ensure that multiple calls don't breach the upper bound reachedMax := false for i := uint64(0); i < 1024; i++ { d := r(i) require.GreaterOrEqual(t, d, tt.min) require.LessOrEqual(t, d, tt.max) require.Falsef(t, reachedMax && d != tt.max, "max value %v was already reached, but r(%d) := %v", tt.max, i, d) if d == tt.max { reachedMax = true } } require.True(t, reachedMax, "max value was never reached") }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/go.mod0000644000000000000000000000215115024302466024314 0ustar rootrootmodule github.com/icinga/icinga-go-library go 1.23.0 require ( github.com/caarlos0/env/v11 v11.3.1 github.com/creasty/defaults v1.8.0 github.com/go-sql-driver/mysql v1.9.3 github.com/goccy/go-yaml v1.13.0 github.com/google/uuid v1.6.0 github.com/jessevdk/go-flags v1.6.1 github.com/jmoiron/sqlx v1.4.0 github.com/lib/pq v1.10.9 github.com/pkg/errors v0.9.1 github.com/redis/go-redis/v9 v9.10.0 github.com/ssgreg/journald v1.0.0 github.com/stretchr/testify v1.10.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/sync v0.15.0 ) require ( filippo.io/edwards25519 v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/fatih/color v1.18.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/sys v0.26.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/retry/0000755000000000000000000000000015024302466024354 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/retry/retry.go0000644000000000000000000001456215024302466026060 0ustar rootrootpackage retry import ( "context" "database/sql/driver" "github.com/go-sql-driver/mysql" "github.com/icinga/icinga-go-library/backoff" "github.com/lib/pq" "github.com/pkg/errors" "io" "net" "strings" "syscall" "time" ) // DefaultTimeout is our opinionated default timeout for retrying database and Redis operations. const DefaultTimeout = 5 * time.Minute // RetryableFunc is a retryable function. type RetryableFunc func(context.Context) error // IsRetryable checks whether a new attempt can be started based on the error passed. type IsRetryable func(error) bool // OnRetryableErrorFunc is called if a retryable error occurs. type OnRetryableErrorFunc func(elapsed time.Duration, attempt uint64, err, lastErr error) // OnSuccessFunc is called once the operation succeeds. type OnSuccessFunc func(elapsed time.Duration, attempt uint64, lastErr error) // Settings aggregates optional settings for WithBackoff. type Settings struct { // If >0, Timeout lets WithBackoff stop retrying gracefully once elapsed based on the following criteria: // * If the execution of RetryableFunc has taken longer than Timeout, no further attempts are made. // * If Timeout elapses during the sleep phase between retries, one final retry is attempted. // * RetryableFunc is always granted its full execution time and is not canceled if it exceeds Timeout. // This means that WithBackoff may not stop exactly after Timeout expires, // or may not retry at all if the first execution of RetryableFunc already takes longer than Timeout. Timeout time.Duration OnRetryableError OnRetryableErrorFunc OnSuccess OnSuccessFunc } // WithBackoff retries the passed function if it fails and the error allows it to retry. // The specified backoff policy is used to determine how long to sleep between attempts. func WithBackoff( ctx context.Context, retryableFunc RetryableFunc, retryable IsRetryable, b backoff.Backoff, settings Settings, ) (err error) { // Channel for retry deadline, which is set to the channel of NewTimer() if a timeout is configured, // otherwise nil, so that it blocks forever if there is no timeout. var timeout <-chan time.Time if settings.Timeout > 0 { t := time.NewTimer(settings.Timeout) defer t.Stop() timeout = t.C } start := time.Now() timedOut := false for attempt := uint64(1); ; /* true */ attempt++ { prevErr := err if err = retryableFunc(ctx); err == nil { if settings.OnSuccess != nil { settings.OnSuccess(time.Since(start), attempt, prevErr) } return } // Retryable function may have exited prematurely due to context errors. // We explicitly check the context error here, as the error returned by the retryable function can pass the // error.Is() checks even though it is not a real context error, e.g. // https://cs.opensource.google/go/go/+/refs/tags/go1.22.2:src/net/net.go;l=422 // https://cs.opensource.google/go/go/+/refs/tags/go1.22.2:src/net/net.go;l=601 if errors.Is(ctx.Err(), context.DeadlineExceeded) || errors.Is(ctx.Err(), context.Canceled) { if prevErr != nil { err = errors.Wrap(err, prevErr.Error()) } return } if !retryable(err) { err = errors.Wrap(err, "can't retry") return } select { case <-timeout: // Stop retrying immediately if executing the retryable function took longer than the timeout. timedOut = true default: } if timedOut { err = errors.Wrap(err, "retry deadline exceeded") return } if settings.OnRetryableError != nil { settings.OnRetryableError(time.Since(start), attempt, err, prevErr) } select { case <-time.After(b(attempt)): case <-timeout: // Do not stop retrying immediately, but start one last attempt to mitigate timing issues where // the timeout expires while waiting for the next attempt and // therefore no retries have happened during this possibly long period. timedOut = true case <-ctx.Done(): err = errors.Wrap(ctx.Err(), err.Error()) return } } } // ResetTimeout changes the possibly expired timer t to expire after duration d. // // If the timer has already expired and nothing has been received from its channel, // it is automatically drained as if the timer had never expired. func ResetTimeout(t *time.Timer, d time.Duration) { if !t.Stop() { <-t.C } t.Reset(d) } // Retryable returns true for common errors that are considered retryable, // i.e. temporary, timeout, DNS, connection refused and reset, host down and unreachable and // network down and unreachable errors. In addition, any database error is considered retryable. func Retryable(err error) bool { var temporary interface { Temporary() bool } if errors.As(err, &temporary) && temporary.Temporary() { return true } var timeout interface { Timeout() bool } if errors.As(err, &timeout) && timeout.Timeout() { return true } var dnsError *net.DNSError if errors.As(err, &dnsError) { return true } var opError *net.OpError if errors.As(err, &opError) { // OpError provides Temporary() and Timeout(), but not Unwrap(), // so we have to extract the underlying error ourselves to also check for ECONNREFUSED, // which is not considered temporary or timed out by Go. err = opError.Err } if errors.Is(err, syscall.ECONNREFUSED) || errors.Is(err, syscall.ENOENT) { // syscall errors provide Temporary() and Timeout(), // which do not include ECONNREFUSED or ENOENT, so we check these ourselves. return true } if errors.Is(err, syscall.ECONNRESET) { // ECONNRESET is treated as a temporary error by Go only if it comes from calling accept. return true } if errors.Is(err, syscall.EHOSTDOWN) || errors.Is(err, syscall.EHOSTUNREACH) { return true } if errors.Is(err, syscall.ENETDOWN) || errors.Is(err, syscall.ENETUNREACH) { return true } if errors.Is(err, syscall.EPIPE) { return true } if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { return true } if errors.Is(err, driver.ErrBadConn) { return true } if errors.Is(err, mysql.ErrInvalidConn) { return true } var mye *mysql.MySQLError var pqe *pq.Error if errors.As(err, &mye) || errors.As(err, &pqe) { return true } // For errors without a five-digit code, github.com/lib/pq uses fmt.Errorf(). // This returns an unexported error type prefixed with "pq: " // Until this gets changed upstream , we can only check the error message. if strings.HasPrefix(err.Error(), "pq: ") { return true } return false } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/0000755000000000000000000000000015024302466024353 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/unix_milli_test.go0000644000000000000000000000657615024302466030130 0ustar rootrootpackage types import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "math" "testing" "time" "unicode/utf8" ) func TestUnixMilli(t *testing.T) { type testCase struct { v UnixMilli json string text string } tests := map[string]testCase{ "Zero": {UnixMilli{}, "null", ""}, "Non-zero": {UnixMilli(time.Unix(1234567890, 0)), "1234567890000", "1234567890000"}, "Epoch": {UnixMilli(time.Unix(0, 0)), "0", "0"}, "With milliseconds": {UnixMilli(time.Unix(1234567890, 62000000)), "1234567890062", "1234567890062"}, } var runTests = func(t *testing.T, f func(*testing.T, testCase)) { for name, test := range tests { t.Run(name, func(t *testing.T) { f(t, test) }) } } t.Run("MarshalJSON", func(t *testing.T) { runTests(t, func(t *testing.T, test testCase) { actual, err := test.v.MarshalJSON() require.NoError(t, err) require.True(t, utf8.Valid(actual)) require.Equal(t, test.json, string(actual)) }) }) t.Run("UnmarshalJSON", func(t *testing.T) { runTests(t, func(t *testing.T, test testCase) { var actual UnixMilli err := actual.UnmarshalJSON([]byte(test.json)) require.NoError(t, err) require.Equal(t, test.v, actual) }) }) t.Run("MarshalText", func(t *testing.T) { runTests(t, func(t *testing.T, test testCase) { actual, err := test.v.MarshalText() require.NoError(t, err) require.True(t, utf8.Valid(actual)) require.Equal(t, test.text, string(actual)) }) }) t.Run("UnmarshalText", func(t *testing.T) { runTests(t, func(t *testing.T, test testCase) { var actual UnixMilli err := actual.UnmarshalText([]byte(test.text)) require.NoError(t, err) require.Equal(t, test.v, actual) }) }) } func TestUnixMilli_Scan(t *testing.T) { tests := []struct { name string v any expected UnixMilli expectErr bool }{ { name: "Nil", v: nil, expected: UnixMilli{}, }, { name: "Epoch", v: int64(0), expected: UnixMilli(time.Unix(0, 0)), }, { name: "bytes", v: []byte("1234567890062"), expected: UnixMilli(time.Unix(1234567890, 62000000)), }, { name: "Invalid bytes", v: []byte("invalid"), expectErr: true, }, { name: "int64", v: int64(1234567890062), expected: UnixMilli(time.Unix(1234567890, 62000000)), }, { name: "uint64", v: uint64(1234567890062), expected: UnixMilli(time.Unix(1234567890, 62000000)), }, { name: "uint64 out of range for int64", v: uint64(math.MaxInt64) + 1, expectErr: true, }, { name: "Invalid type", v: "invalid", expectErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var actual UnixMilli err := actual.Scan(test.v) if test.expectErr { assert.Error(t, err) } else { assert.NoError(t, err) assert.Equal(t, test.expected, actual) } }) } } func TestUnixMilli_Value(t *testing.T) { t.Run("Zero", func(t *testing.T) { var zero UnixMilli actual, err := zero.Value() require.NoError(t, err) require.Nil(t, actual) }) t.Run("Non-zero", func(t *testing.T) { withMilliseconds := time.Unix(1234567890, 62000000) expected := withMilliseconds.UnixMilli() actual, err := UnixMilli(withMilliseconds).Value() assert.NoError(t, err) assert.Equal(t, expected, actual) }) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/uuid_test.go0000644000000000000000000000100015024302466026676 0ustar rootrootpackage types import ( "github.com/google/uuid" "github.com/stretchr/testify/require" "testing" ) func TestUUID_Value(t *testing.T) { nonzero := uuid.New() subtests := []struct { name string input uuid.UUID output []byte }{ {"zero", uuid.UUID{}, make([]byte, 16)}, {"nonzero", nonzero, nonzero[:]}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := UUID{st.input}.Value() require.NoError(t, err) require.Equal(t, st.output, actual) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/unix_milli.go0000644000000000000000000000526415024302466027062 0ustar rootrootpackage types import ( "bytes" "database/sql" "database/sql/driver" "encoding" "encoding/json" "github.com/pkg/errors" "math" "strconv" "time" ) // UnixMilli is a nullable millisecond UNIX timestamp in databases and JSON. type UnixMilli time.Time // Time returns the time.Time conversion of UnixMilli. func (t UnixMilli) Time() time.Time { return time.Time(t) } // MarshalJSON implements the json.Marshaler interface. // Marshals to milliseconds. Supports JSON null. func (t UnixMilli) MarshalJSON() ([]byte, error) { if time.Time(t).IsZero() { return []byte("null"), nil } return []byte(strconv.FormatInt(t.Time().UnixMilli(), 10)), nil } // UnmarshalJSON implements the json.Unmarshaler interface. // Unmarshals from milliseconds. Supports JSON null. func (t *UnixMilli) UnmarshalJSON(data []byte) error { if bytes.Equal(data, []byte("null")) || len(data) == 0 { return nil } return t.fromByteString(data) } // MarshalText implements the encoding.TextMarshaler interface. func (t UnixMilli) MarshalText() ([]byte, error) { if time.Time(t).IsZero() { return []byte{}, nil } return []byte(strconv.FormatInt(t.Time().UnixMilli(), 10)), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. func (t *UnixMilli) UnmarshalText(text []byte) error { if len(text) == 0 { return nil } return t.fromByteString(text) } // Scan implements the sql.Scanner interface. // Scans from milliseconds. Supports SQL NULL. func (t *UnixMilli) Scan(src interface{}) error { if src == nil { return nil } switch v := src.(type) { case []byte: return t.fromByteString(v) // https://github.com/go-sql-driver/mysql/pull/1452 case uint64: if v > math.MaxInt64 { return errors.Errorf("value %v out of range for int64", v) } *t = UnixMilli(time.UnixMilli(int64(v))) case int64: *t = UnixMilli(time.UnixMilli(v)) default: return errors.Errorf("bad (u)int64/[]byte type assertion from %[1]v (%[1]T)", src) } return nil } // Value implements the driver.Valuer interface. // Returns milliseconds. Supports SQL NULL. func (t UnixMilli) Value() (driver.Value, error) { if t.Time().IsZero() { return nil, nil } return t.Time().UnixMilli(), nil } func (t *UnixMilli) fromByteString(data []byte) error { i, err := strconv.ParseInt(string(data), 10, 64) if err != nil { return CantParseInt64(err, string(data)) } *t = UnixMilli(time.UnixMilli(i)) return nil } // Assert interface compliance. var ( _ encoding.TextMarshaler = UnixMilli{} _ encoding.TextUnmarshaler = (*UnixMilli)(nil) _ json.Marshaler = UnixMilli{} _ json.Unmarshaler = (*UnixMilli)(nil) _ driver.Valuer = UnixMilli{} _ sql.Scanner = (*UnixMilli)(nil) ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/utils_test.go0000644000000000000000000000077515024302466027112 0ustar rootrootpackage types import ( "fmt" "github.com/stretchr/testify/require" "os" "testing" ) func TestName(t *testing.T) { subtests := []struct { name string input any output string }{ {"nil", nil, ""}, {"simple", 1, "int"}, {"pointer", (*int)(nil), "int"}, {"package", os.FileMode(0), "FileMode"}, {"pointer_package", (*fmt.Formatter)(nil), "Formatter"}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, st.output, Name(st.input)) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/int_test.go0000644000000000000000000000632015024302466026534 0ustar rootrootpackage types import ( "database/sql" "github.com/stretchr/testify/require" "testing" ) func TestMakeInt(t *testing.T) { subtests := []struct { name string input int64 transformers []func(*Int) output sql.NullInt64 }{ { name: "zero", input: 0, output: sql.NullInt64{Int64: 0, Valid: true}, }, { name: "positive", input: 1, output: sql.NullInt64{Int64: 1, Valid: true}, }, { name: "negative", input: -1, output: sql.NullInt64{Int64: -1, Valid: true}, }, { name: "zero-transform-zero-to-null", input: 0, transformers: []func(*Int){TransformZeroIntToNull}, output: sql.NullInt64{Valid: false}, }, { name: "positive-transform-zero-to-null", input: 1, transformers: []func(*Int){TransformZeroIntToNull}, output: sql.NullInt64{Int64: 1, Valid: true}, }, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, Int{NullInt64: st.output}, MakeInt(st.input, st.transformers...)) }) } } func TestInt_MarshalJSON(t *testing.T) { subtests := []struct { name string input sql.NullInt64 output string }{ {"null", sql.NullInt64{}, `null`}, {"invalid", sql.NullInt64{Int64: 42}, `null`}, {"zero", sql.NullInt64{Int64: 0, Valid: true}, `0`}, {"negative", sql.NullInt64{Int64: -1, Valid: true}, `-1`}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := Int{st.input}.MarshalJSON() require.NoError(t, err) require.Equal(t, st.output, string(actual)) }) } } func TestInt_UnmarshalText(t *testing.T) { subtests := []struct { name string input string output sql.NullInt64 error bool }{ {"empty", "", sql.NullInt64{}, true}, {"2p64", "18446744073709551616", sql.NullInt64{}, true}, {"float", "0.0", sql.NullInt64{}, true}, {"zero", "0", sql.NullInt64{Int64: 0, Valid: true}, false}, {"negative", "-1", sql.NullInt64{Int64: -1, Valid: true}, false}, {"2p62", "4611686018427387904", sql.NullInt64{Int64: 1 << 62, Valid: true}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Int if err := actual.UnmarshalText([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, Int{NullInt64: st.output}, actual) } }) } } func TestInt_UnmarshalJSON(t *testing.T) { subtests := []struct { name string input string output sql.NullInt64 error bool }{ {"null", `null`, sql.NullInt64{}, false}, {"bool", `false`, sql.NullInt64{}, true}, {"2p64", `18446744073709551616`, sql.NullInt64{}, true}, {"float", `0.0`, sql.NullInt64{}, true}, {"string", `"0"`, sql.NullInt64{}, true}, {"zero", `0`, sql.NullInt64{Int64: 0, Valid: true}, false}, {"negative", `-1`, sql.NullInt64{Int64: -1, Valid: true}, false}, {"2p62", `4611686018427387904`, sql.NullInt64{Int64: 1 << 62, Valid: true}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Int if err := actual.UnmarshalJSON([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, Int{NullInt64: st.output}, actual) } }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/bool.go0000644000000000000000000000364715024302466025647 0ustar rootrootpackage types import ( "database/sql" "database/sql/driver" "encoding" "encoding/json" "github.com/pkg/errors" "strconv" ) var ( enum = map[bool]string{ true: "y", false: "n", } ) // Bool represents a bool for ENUM ('y', 'n'), which can be NULL. type Bool struct { Bool bool Valid bool // Valid is true if Bool is not NULL } // MarshalJSON implements the json.Marshaler interface. func (b Bool) MarshalJSON() ([]byte, error) { if !b.Valid { return []byte("null"), nil } return MarshalJSON(b.Bool) } // UnmarshalText implements the encoding.TextUnmarshaler interface. func (b *Bool) UnmarshalText(text []byte) error { parsed, err := strconv.ParseUint(string(text), 10, 64) if err != nil { return CantParseUint64(err, string(text)) } *b = Bool{parsed != 0, true} return nil } // UnmarshalJSON implements the json.Unmarshaler interface. func (b *Bool) UnmarshalJSON(data []byte) error { if string(data) == "null" || len(data) == 0 { return nil } if err := UnmarshalJSON(data, &b.Bool); err != nil { return err } b.Valid = true return nil } // Scan implements the sql.Scanner interface. // Supports SQL NULL. func (b *Bool) Scan(src interface{}) error { if src == nil { b.Bool, b.Valid = false, false return nil } v, ok := src.([]byte) if !ok { return errors.Errorf("bad []byte type assertion from %#v", src) } switch string(v) { case "y": b.Bool = true case "n": b.Bool = false default: return errors.Errorf("bad bool %#v", v) } b.Valid = true return nil } // Value implements the driver.Valuer interface. // Supports SQL NULL. func (b Bool) Value() (driver.Value, error) { if !b.Valid { return nil, nil } return enum[b.Bool], nil } // Assert interface compliance. var ( _ json.Marshaler = Bool{} _ encoding.TextUnmarshaler = (*Bool)(nil) _ json.Unmarshaler = (*Bool)(nil) _ sql.Scanner = (*Bool)(nil) _ driver.Valuer = Bool{} ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/float_test.go0000644000000000000000000000504015024302466027045 0ustar rootrootpackage types import ( "database/sql" "github.com/stretchr/testify/require" "testing" ) func TestFloat_MarshalJSON(t *testing.T) { subtests := []struct { name string input sql.NullFloat64 output string }{ {"null", sql.NullFloat64{}, `null`}, {"invalid", sql.NullFloat64{Float64: 42}, `null`}, {"zero", sql.NullFloat64{Float64: 0, Valid: true}, `0`}, {"negative", sql.NullFloat64{Float64: -1, Valid: true}, `-1`}, {"fraction", sql.NullFloat64{Float64: 0.5, Valid: true}, `0.5`}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := Float{st.input}.MarshalJSON() require.NoError(t, err) require.Equal(t, st.output, string(actual)) }) } } func TestFloat_UnmarshalText(t *testing.T) { subtests := []struct { name string input string output sql.NullFloat64 error bool }{ {"empty", "", sql.NullFloat64{}, true}, {"too_big", "1e309", sql.NullFloat64{}, true}, {"zero", "0", sql.NullFloat64{Float64: 0, Valid: true}, false}, {"negative", "-1", sql.NullFloat64{Float64: -1, Valid: true}, false}, {"fraction", "0.5", sql.NullFloat64{Float64: 0.5, Valid: true}, false}, {"exp", "2e1", sql.NullFloat64{Float64: 20, Valid: true}, false}, {"too_precise", "1e-1337", sql.NullFloat64{Float64: 0, Valid: true}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Float if err := actual.UnmarshalText([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, Float{NullFloat64: st.output}, actual) } }) } } func TestFloat_UnmarshalJSON(t *testing.T) { subtests := []struct { name string input string output sql.NullFloat64 error bool }{ {"null", `null`, sql.NullFloat64{}, false}, {"bool", `false`, sql.NullFloat64{}, true}, {"string", `"0"`, sql.NullFloat64{}, true}, {"too_big", `1e309`, sql.NullFloat64{}, true}, {"zero", `0`, sql.NullFloat64{Float64: 0, Valid: true}, false}, {"negative", `-1`, sql.NullFloat64{Float64: -1, Valid: true}, false}, {"fraction", `0.5`, sql.NullFloat64{Float64: 0.5, Valid: true}, false}, {"exp", `2e1`, sql.NullFloat64{Float64: 20, Valid: true}, false}, {"too_precise", `1e-1337`, sql.NullFloat64{Float64: 0, Valid: true}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Float if err := actual.UnmarshalJSON([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, Float{NullFloat64: st.output}, actual) } }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/float.go0000644000000000000000000000245415024302466026014 0ustar rootrootpackage types import ( "bytes" "database/sql" "database/sql/driver" "encoding" "encoding/json" "strconv" ) // Float adds JSON support to sql.NullFloat64. type Float struct { sql.NullFloat64 } // MarshalJSON implements the json.Marshaler interface. // Supports JSON null. func (f Float) MarshalJSON() ([]byte, error) { var v interface{} if f.Valid { v = f.Float64 } return MarshalJSON(v) } // UnmarshalText implements the encoding.TextUnmarshaler interface. func (f *Float) UnmarshalText(text []byte) error { parsed, err := strconv.ParseFloat(string(text), 64) if err != nil { return CantParseFloat64(err, string(text)) } *f = Float{sql.NullFloat64{ Float64: parsed, Valid: true, }} return nil } // UnmarshalJSON implements the json.Unmarshaler interface. // Supports JSON null. func (f *Float) UnmarshalJSON(data []byte) error { // Ignore null, like in the main JSON package. if bytes.HasPrefix(data, []byte{'n'}) { return nil } if err := UnmarshalJSON(data, &f.Float64); err != nil { return err } f.Valid = true return nil } // Assert interface compliance. var ( _ json.Marshaler = Float{} _ encoding.TextUnmarshaler = (*Float)(nil) _ json.Unmarshaler = (*Float)(nil) _ driver.Valuer = Float{} _ sql.Scanner = (*Float)(nil) ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/string.go0000644000000000000000000000403015024302466026205 0ustar rootrootpackage types import ( "bytes" "database/sql" "database/sql/driver" "encoding" "encoding/json" "strings" ) // String adds JSON support to sql.NullString. type String struct { sql.NullString } // TransformEmptyStringToNull transforms a valid String carrying an empty text to a SQL NULL. func TransformEmptyStringToNull(s *String) { if s.Valid && s.String == "" { s.Valid = false } } // MakeString constructs a new String. // // Multiple transformer functions can be given, each transforming the generated String, e.g., TransformEmptyStringToNull. func MakeString(in string, transformers ...func(*String)) String { s := String{sql.NullString{ String: in, Valid: true, }} for _, transformer := range transformers { transformer(&s) } return s } // MarshalJSON implements the json.Marshaler interface. // Supports JSON null. func (s String) MarshalJSON() ([]byte, error) { var v interface{} if s.Valid { v = s.String } return MarshalJSON(v) } // UnmarshalText implements the encoding.TextUnmarshaler interface. func (s *String) UnmarshalText(text []byte) error { *s = String{sql.NullString{ String: string(text), Valid: true, }} return nil } // UnmarshalJSON implements the json.Unmarshaler interface. // Supports JSON null. func (s *String) UnmarshalJSON(data []byte) error { // Ignore null, like in the main JSON package. if bytes.HasPrefix(data, []byte{'n'}) { return nil } if err := UnmarshalJSON(data, &s.String); err != nil { return err } s.Valid = true return nil } // Value implements the driver.Valuer interface. // Supports SQL NULL. func (s String) Value() (driver.Value, error) { if !s.Valid { return nil, nil } // PostgreSQL does not allow null bytes in varchar, char and text fields. return strings.ReplaceAll(s.String, "\x00", ""), nil } // Assert interface compliance. var ( _ json.Marshaler = String{} _ encoding.TextUnmarshaler = (*String)(nil) _ json.Unmarshaler = (*String)(nil) _ driver.Valuer = String{} _ sql.Scanner = (*String)(nil) ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/bool_test.go0000644000000000000000000000613015024302466026674 0ustar rootrootpackage types import ( "fmt" "github.com/stretchr/testify/require" "testing" "unicode/utf8" ) func TestBool_MarshalJSON(t *testing.T) { subtests := []struct { input Bool output string }{ {Bool{Bool: false, Valid: false}, `null`}, {Bool{Bool: false, Valid: true}, `false`}, {Bool{Bool: true, Valid: false}, `null`}, {Bool{Bool: true, Valid: true}, `true`}, } for _, st := range subtests { t.Run(fmt.Sprintf("Bool-%#v_Valid-%#v", st.input.Bool, st.input.Valid), func(t *testing.T) { actual, err := st.input.MarshalJSON() require.NoError(t, err) require.True(t, utf8.Valid(actual)) require.Equal(t, st.output, string(actual)) }) } } func TestBool_UnmarshalText(t *testing.T) { subtests := []struct { name string input string output Bool error bool }{ {"empty", "", Bool{}, true}, {"negative", "-1", Bool{}, true}, {"bool", "false", Bool{}, true}, {"b", "f", Bool{}, true}, {"float", "0.0", Bool{}, true}, {"zero", "0", Bool{Bool: false, Valid: true}, false}, {"one", "1", Bool{Bool: true, Valid: true}, false}, {"two", "2", Bool{Bool: true, Valid: true}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Bool if err := actual.UnmarshalText([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, st.output, actual) } }) } } func TestBool_UnmarshalJSON(t *testing.T) { subtests := []struct { name string input string output Bool error bool }{ {"null", `null`, Bool{}, false}, {"false", `false`, Bool{Bool: false, Valid: true}, false}, {"true", `true`, Bool{Bool: true, Valid: true}, false}, {"number", `0`, Bool{}, true}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Bool if err := actual.UnmarshalJSON([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, st.output, actual) } }) } } func TestBool_Scan(t *testing.T) { subtests := []struct { name string input any output Bool error bool }{ {"nil", nil, Bool{}, false}, {"bool", false, Bool{}, true}, {"int64", int64(0), Bool{}, true}, {"string", "false", Bool{}, true}, {"n", []byte("n"), Bool{Bool: false, Valid: true}, false}, {"y", []byte("y"), Bool{Bool: true, Valid: true}, false}, {"invalid", []byte("false"), Bool{}, true}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Bool if err := actual.Scan(st.input); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, st.output, actual) } }) } } func TestBool_Value(t *testing.T) { subtests := []struct { name string input Bool output any }{ {"nil", Bool{}, nil}, {"invalid", Bool{Bool: true, Valid: false}, nil}, {"false", Bool{Bool: false, Valid: true}, "n"}, {"true", Bool{Bool: true, Valid: true}, "y"}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := st.input.Value() require.NoError(t, err) require.Equal(t, st.output, actual) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/string_test.go0000644000000000000000000000763115024302466027256 0ustar rootrootpackage types import ( "database/sql" "github.com/stretchr/testify/require" "testing" "unicode/utf8" ) func TestMakeString(t *testing.T) { subtests := []struct { name string input string transformers []func(*String) output sql.NullString }{ { name: "empty", input: "", output: sql.NullString{String: "", Valid: true}, }, { name: "nul", input: "\x00", output: sql.NullString{String: "\x00", Valid: true}, }, { name: "space", input: " ", output: sql.NullString{String: " ", Valid: true}, }, { name: "valid-text", input: "abc", output: sql.NullString{String: "abc", Valid: true}, }, { name: "empty-transform-empty-to-null", input: "", transformers: []func(*String){TransformEmptyStringToNull}, output: sql.NullString{Valid: false}, }, { name: "valid-text-transform-empty-to-null", input: "abc", transformers: []func(*String){TransformEmptyStringToNull}, output: sql.NullString{String: "abc", Valid: true}, }, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, String{NullString: st.output}, MakeString(st.input, st.transformers...)) }) } } func TestString_MarshalJSON(t *testing.T) { subtests := []struct { name string input sql.NullString output string }{ {"null", sql.NullString{}, `null`}, {"invalid", sql.NullString{String: "abc"}, `null`}, {"empty", sql.NullString{String: "", Valid: true}, `""`}, {"nul", sql.NullString{String: "\x00", Valid: true}, `"\u0000"`}, {"space", sql.NullString{String: " ", Valid: true}, `" "`}, {"multiple", sql.NullString{String: "abc", Valid: true}, `"abc"`}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := String{st.input}.MarshalJSON() require.NoError(t, err) require.True(t, utf8.Valid(actual)) require.Equal(t, st.output, string(actual)) }) } } func TestString_UnmarshalText(t *testing.T) { subtests := []struct { name string io string }{ {"empty", ""}, {"nul", "\x00"}, {"space", " "}, {"multiple", "abc"}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual String require.NoError(t, actual.UnmarshalText([]byte(st.io))) require.Equal(t, String{NullString: sql.NullString{String: st.io, Valid: true}}, actual) }) } } func TestString_UnmarshalJSON(t *testing.T) { subtests := []struct { name string input string output sql.NullString error bool }{ {"null", `null`, sql.NullString{}, false}, {"bool", `false`, sql.NullString{}, true}, {"number", `0`, sql.NullString{}, true}, {"empty", `""`, sql.NullString{String: "", Valid: true}, false}, {"nul", `"\u0000"`, sql.NullString{String: "\x00", Valid: true}, false}, {"space", `" "`, sql.NullString{String: " ", Valid: true}, false}, {"multiple", `"abc"`, sql.NullString{String: "abc", Valid: true}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual String if err := actual.UnmarshalJSON([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, String{NullString: st.output}, actual) } }) } } func TestString_Value(t *testing.T) { subtests := []struct { name string input sql.NullString output any }{ {"nil", sql.NullString{}, nil}, {"invalid", sql.NullString{String: "abc"}, nil}, {"empty", sql.NullString{String: "", Valid: true}, ""}, {"nul", sql.NullString{String: "\x00", Valid: true}, ""}, {"space", sql.NullString{String: " ", Valid: true}, " "}, {"multiple", sql.NullString{String: "abc", Valid: true}, "abc"}, {"nuls", sql.NullString{String: "\x00 \x00", Valid: true}, " "}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := String{st.input}.Value() require.NoError(t, err) require.Equal(t, st.output, actual) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/binary.go0000644000000000000000000000553715024302466026200 0ustar rootrootpackage types import ( "bytes" "database/sql" "database/sql/driver" "encoding" "encoding/hex" "encoding/json" "fmt" "github.com/pkg/errors" ) // Binary nullable byte string. Hex as JSON. type Binary []byte // nullBinary for validating whether a Binary is valid. var nullBinary Binary // Valid returns whether the Binary is valid. func (binary Binary) Valid() bool { return !bytes.Equal(binary, nullBinary) } // String returns the hex string representation form of the Binary. func (binary Binary) String() string { return hex.EncodeToString(binary) } // MarshalText implements a custom marhsal function to encode // the Binary as hex. MarshalText implements the // encoding.TextMarshaler interface. func (binary Binary) MarshalText() ([]byte, error) { return []byte(binary.String()), nil } // UnmarshalText implements a custom unmarshal function to decode // hex into a Binary. UnmarshalText implements the // encoding.TextUnmarshaler interface. func (binary *Binary) UnmarshalText(text []byte) error { b := make([]byte, hex.DecodedLen(len(text))) _, err := hex.Decode(b, text) if err != nil { return CantDecodeHex(err, string(text)) } *binary = b return nil } // MarshalJSON implements a custom marshal function to encode the Binary // as a hex string. MarshalJSON implements the json.Marshaler interface. // Supports JSON null. func (binary Binary) MarshalJSON() ([]byte, error) { if !binary.Valid() { return []byte("null"), nil } return MarshalJSON(binary.String()) } // UnmarshalJSON implements a custom unmarshal function to decode // a JSON hex string into a Binary. UnmarshalJSON implements the // json.Unmarshaler interface. Supports JSON null. func (binary *Binary) UnmarshalJSON(data []byte) error { if string(data) == "null" || len(data) == 0 { return nil } var s string if err := UnmarshalJSON(data, &s); err != nil { return err } b, err := hex.DecodeString(s) if err != nil { return CantDecodeHex(err, s) } *binary = b return nil } // Scan implements the sql.Scanner interface. // Supports SQL NULL. func (binary *Binary) Scan(src interface{}) error { switch src := src.(type) { case nil: return nil case []byte: if len(src) == 0 { return nil } b := make([]byte, len(src)) copy(b, src) *binary = b default: return errors.Errorf("unable to scan type %T into Binary", src) } return nil } // Value implements the driver.Valuer interface. // Supports SQL NULL. func (binary Binary) Value() (driver.Value, error) { if !binary.Valid() { return nil, nil } return []byte(binary), nil } // Assert interface compliance. var ( _ fmt.Stringer = Binary{} _ encoding.TextMarshaler = Binary{} _ encoding.TextUnmarshaler = (*Binary)(nil) _ json.Marshaler = Binary{} _ json.Unmarshaler = (*Binary)(nil) _ sql.Scanner = (*Binary)(nil) _ driver.Valuer = Binary{} ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/uuid.go0000644000000000000000000000073215024302466025652 0ustar rootrootpackage types import ( "database/sql/driver" "encoding" "github.com/google/uuid" ) // UUID is like uuid.UUID, but marshals itself binarily (not like xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) in SQL context. type UUID struct { uuid.UUID } // Value implements driver.Valuer. func (uuid UUID) Value() (driver.Value, error) { return uuid.UUID[:], nil } // Assert interface compliance. var ( _ encoding.TextUnmarshaler = (*UUID)(nil) _ driver.Valuer = UUID{} ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/utils.go0000644000000000000000000000330115024302466026037 0ustar rootrootpackage types import ( "encoding/json" "fmt" "github.com/pkg/errors" "strings" ) // Name returns the declared name of type t. func Name(t any) string { s := strings.TrimLeft(fmt.Sprintf("%T", t), "*") return s[strings.LastIndex(s, ".")+1:] } // CantDecodeHex wraps the given error with the given string that cannot be hex-decoded. func CantDecodeHex(err error, s string) error { return errors.Wrapf(err, "can't decode hex %q", s) } // CantParseFloat64 wraps the given error with the specified string that cannot be parsed into float64. func CantParseFloat64(err error, s string) error { return errors.Wrapf(err, "can't parse %q into float64", s) } // CantParseInt64 wraps the given error with the specified string that cannot be parsed into int64. func CantParseInt64(err error, s string) error { return errors.Wrapf(err, "can't parse %q into int64", s) } // CantParseUint64 wraps the given error with the specified string that cannot be parsed into uint64. func CantParseUint64(err error, s string) error { return errors.Wrapf(err, "can't parse %q into uint64", s) } // CantUnmarshalYAML wraps the given error with the designated value, which cannot be unmarshalled into. func CantUnmarshalYAML(err error, v interface{}) error { return errors.Wrapf(err, "can't unmarshal YAML into %T", v) } // MarshalJSON calls json.Marshal and wraps any resulting errors. func MarshalJSON(v interface{}) ([]byte, error) { b, err := json.Marshal(v) return b, errors.Wrapf(err, "can't marshal JSON from %T", v) } // UnmarshalJSON calls json.Unmarshal and wraps any resulting errors. func UnmarshalJSON(data []byte, v interface{}) error { return errors.Wrapf(json.Unmarshal(data, v), "can't unmarshal JSON into %T", v) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/binary_test.go0000644000000000000000000001022415024302466027224 0ustar rootrootpackage types import ( "github.com/stretchr/testify/require" "testing" "unicode/utf8" ) func TestBinary_Valid(t *testing.T) { subtests := []struct { name string input Binary output bool }{ {"nil", nil, false}, {"empty", make(Binary, 0, 1), false}, {"nul", Binary{0}, true}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, st.output, st.input.Valid()) }) } } func TestBinary_String(t *testing.T) { subtests := []struct { name string input Binary output string }{ {"nil", nil, ""}, {"nul", Binary{0}, "00"}, {"hex", Binary{10}, "0a"}, {"multiple", Binary{1, 254}, "01fe"}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, st.output, st.input.String()) }) } } func TestBinary_MarshalText(t *testing.T) { subtests := []struct { name string input Binary output string }{ {"nil", nil, ""}, {"nul", Binary{0}, "00"}, {"hex", Binary{10}, "0a"}, {"multiple", Binary{1, 254}, "01fe"}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := st.input.MarshalText() require.NoError(t, err) require.True(t, utf8.Valid(actual)) require.Equal(t, st.output, string(actual)) }) } } func TestBinary_UnmarshalText(t *testing.T) { subtests := []struct { name string input string output Binary error bool }{ {"empty", "", Binary{}, false}, {"invalid_length", "0", Binary{}, true}, {"invalid_char", "0g", Binary{}, true}, {"hex", "0a", Binary{10}, false}, {"multiple", "01fe", Binary{1, 254}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Binary if err := actual.UnmarshalText([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, st.output, actual) } }) } } func TestBinary_MarshalJSON(t *testing.T) { subtests := []struct { name string input Binary output string }{ {"nil", nil, `null`}, {"empty", make(Binary, 0, 1), `null`}, {"space", Binary(" "), `"20"`}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := st.input.MarshalJSON() require.NoError(t, err) require.True(t, utf8.Valid(actual)) require.Equal(t, st.output, string(actual)) }) } } func TestBinary_UnmarshalJSON(t *testing.T) { subtests := []struct { name string input string output Binary error bool }{ {"null", `null`, nil, false}, {"bool", `false`, nil, true}, {"number", `10`, nil, true}, {"invalid_length", `"0"`, nil, true}, {"invalid_char", `"0g"`, nil, true}, {"empty", `""`, make(Binary, 0, 1), false}, {"nul", `"00"`, Binary{0}, false}, {"hex", `"0a"`, Binary{10}, false}, {"multiple", `"01fe"`, Binary{1, 254}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Binary if err := actual.UnmarshalJSON([]byte(st.input)); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, st.output, actual) } }) } } func TestBinary_Scan(t *testing.T) { subtests := []struct { name string input any output Binary error bool }{ {"nil", nil, nil, false}, {"bool", false, nil, true}, {"number", 10, nil, true}, {"string", "10", nil, true}, {"empty", make([]byte, 0, 1), nil, false}, {"nul", []byte{0}, Binary{0}, false}, {"hex", []byte{10}, Binary{10}, false}, {"multiple", []byte{1, 254}, Binary{1, 254}, false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { var actual Binary if err := actual.Scan(st.input); st.error { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, st.output, actual) } }) } } func TestBinary_Value(t *testing.T) { subtests := []struct { name string input Binary output any }{ {"empty", make(Binary, 0, 1), nil}, {"nul", Binary{0}, []byte{0}}, {"hex", Binary{10}, []byte{10}}, {"multiple", Binary{1, 254}, []byte{1, 254}}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { actual, err := st.input.Value() require.NoError(t, err) require.Equal(t, st.output, actual) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/types/int.go0000644000000000000000000000342315024302466025476 0ustar rootrootpackage types import ( "bytes" "database/sql" "database/sql/driver" "encoding" "encoding/json" "strconv" ) // Int adds JSON support to sql.NullInt64. type Int struct { sql.NullInt64 } // TransformZeroIntToNull transforms a valid Int carrying a zero value to a SQL NULL. func TransformZeroIntToNull(i *Int) { if i.Valid && i.Int64 == 0 { i.Valid = false } } // MakeInt constructs a new Int. // // Multiple transformer functions can be given, each transforming the generated Int, e.g., TransformZeroIntToNull. func MakeInt(in int64, transformers ...func(*Int)) Int { i := Int{sql.NullInt64{ Int64: in, Valid: true, }} for _, transformer := range transformers { transformer(&i) } return i } // MarshalJSON implements the json.Marshaler interface. // Supports JSON null. func (i Int) MarshalJSON() ([]byte, error) { var v interface{} if i.Valid { v = i.Int64 } return MarshalJSON(v) } // UnmarshalText implements the encoding.TextUnmarshaler interface. func (i *Int) UnmarshalText(text []byte) error { parsed, err := strconv.ParseInt(string(text), 10, 64) if err != nil { return CantParseInt64(err, string(text)) } *i = Int{sql.NullInt64{ Int64: parsed, Valid: true, }} return nil } // UnmarshalJSON implements the json.Unmarshaler interface. // Supports JSON null. func (i *Int) UnmarshalJSON(data []byte) error { // Ignore null, like in the main JSON package. if bytes.HasPrefix(data, []byte{'n'}) { return nil } if err := UnmarshalJSON(data, &i.Int64); err != nil { return err } i.Valid = true return nil } // Assert interface compliance. var ( _ json.Marshaler = Int{} _ json.Unmarshaler = (*Int)(nil) _ encoding.TextUnmarshaler = (*Int)(nil) _ driver.Valuer = Int{} _ sql.Scanner = (*Int)(nil) ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/config/0000755000000000000000000000000015024302466024454 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/config/tls.go0000644000000000000000000000743215024302466025613 0ustar rootrootpackage config import ( "crypto/tls" "crypto/x509" "encoding/pem" "github.com/pkg/errors" "os" ) // TLS represents configuration for a TLS client. // It provides options to enable TLS, specify certificate and key files, // CA certificate, and whether to skip verification of the server's certificate chain and host name. // Use the [TLS.MakeConfig] method to assemble a [*tls.Config] from the TLS struct. // // Example usage: // // func main() { // tlsConfig := &config.TLS{ // Enable: true, // Cert: "path/to/cert.pem", // Key: "path/to/key.pem", // Ca: "path/to/ca.pem", // Insecure: false, // } // // cfg, err := tlsConfig.MakeConfig("example.com") // if err != nil { // log.Fatalf("error creating TLS config: %v", err) // } // // // ... // } type TLS struct { // Enable indicates whether TLS is enabled. Enable bool `yaml:"tls" env:"TLS"` // Cert is either the path to the TLS certificate file or a raw PEM-encoded string representing it. // If provided, Key must also be specified. Cert string `yaml:"cert" env:"CERT"` // Key is either the path to the TLS key file or a raw PEM-encoded string representing it. // If specified, Cert must also be provided. Key string `yaml:"key" env:"KEY,unset"` // Ca is either the path to the CA certificate file or a raw PEM-encoded string representing it. Ca string `yaml:"ca" env:"CA"` // Insecure indicates whether to skip verification of the server's certificate chain and host name. // If true, any certificate presented by the server and any host name in that certificate is accepted. // In this mode, TLS is susceptible to machine-in-the-middle attacks unless custom verification is used. Insecure bool `yaml:"insecure" env:"INSECURE"` } // loadPemOrFile either returns a PEM from within the string or treats it as a file, returning its content. func loadPemOrFile(pemOrFile string) ([]byte, error) { block, _ := pem.Decode([]byte(pemOrFile)) if block != nil { return []byte(pemOrFile), nil } data, err := os.ReadFile(pemOrFile) // #nosec G304 -- inclusion of user-specified file if err != nil { return nil, err } return data, nil } // MakeConfig assembles a [*tls.Config] from the TLS struct and the provided serverName. // It returns a configured *tls.Config or an error if there are issues with the provided TLS settings. // If TLS is not enabled (t.Enable is false), it returns nil without an error. func (t *TLS) MakeConfig(serverName string) (*tls.Config, error) { if !t.Enable { return nil, nil } tlsConfig := &tls.Config{MinVersion: tls.VersionTLS12} hasKeyWithoutCert := t.Key != "" && t.Cert == "" hasCertWithoutKey := t.Cert != "" && t.Key == "" hasClientCert := t.Cert != "" && t.Key != "" if hasKeyWithoutCert { return nil, errors.New("private key given, but client certificate missing") } if hasCertWithoutKey { return nil, errors.New("client certificate given, but private key missing") } if hasClientCert { certPem, err := loadPemOrFile(t.Cert) if err != nil { return nil, errors.Wrap(err, "can't load X.509 client certificate") } keyPem, err := loadPemOrFile(t.Key) if err != nil { return nil, errors.Wrap(err, "can't load X.509 private key") } crt, err := tls.X509KeyPair(certPem, keyPem) if err != nil { return nil, errors.Wrap(err, "can't parse client certificate and private key into an X.509 key pair") } tlsConfig.Certificates = []tls.Certificate{crt} } if t.Insecure { tlsConfig.InsecureSkipVerify = true } else if t.Ca != "" { caPem, err := loadPemOrFile(t.Ca) if err != nil { return nil, errors.Wrap(err, "can't load X.509 CA certificate") } tlsConfig.RootCAs = x509.NewCertPool() if !tlsConfig.RootCAs.AppendCertsFromPEM(caPem) { return nil, errors.New("can't parse CA file") } } tlsConfig.ServerName = serverName return tlsConfig, nil } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/config/tls_test.go0000644000000000000000000002420615024302466026650 0ustar rootrootpackage config import ( "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "github.com/pkg/errors" "github.com/stretchr/testify/require" "math/big" "os" "testing" "time" ) func Test_loadPemOrFile(t *testing.T) { cert, _, err := generateCert("cert", generateCertOptions{}) require.NoError(t, err) certPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) certFile, err := os.CreateTemp("", "cert-*.pem") require.NoError(t, err) defer func(name string) { _ = os.Remove(name) }(certFile.Name()) _, err = certFile.Write(certPem) require.NoError(t, err) t.Run("Load raw PEM", func(t *testing.T) { out, err := loadPemOrFile(string(certPem)) require.NoError(t, err) require.Equal(t, certPem, out) }) t.Run("Load file", func(t *testing.T) { out, err := loadPemOrFile(certFile.Name()) require.NoError(t, err) require.Equal(t, certPem, out) }) t.Run("Invalid file", func(t *testing.T) { _, err := loadPemOrFile("/dev/null/nonexistent") require.Error(t, err) }) } func TestTLS_MakeConfig(t *testing.T) { t.Run("TLS disabled", func(t *testing.T) { tlsConfig := &TLS{Enable: false} config, err := tlsConfig.MakeConfig("icinga.com") require.NoError(t, err) require.Nil(t, config) }) t.Run("Server name", func(t *testing.T) { tlsConfig := &TLS{Enable: true} config, err := tlsConfig.MakeConfig("icinga.com") require.NoError(t, err) require.NotNil(t, config) require.Equal(t, "icinga.com", config.ServerName) }) t.Run("Empty server name", func(t *testing.T) { t.Skip("TODO: Either ServerName or InsecureSkipVerify must be specified in the tls.Config and" + " should be verified in MakeConfig.") }) t.Run("Insecure skip verify", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Insecure: true} config, err := tlsConfig.MakeConfig("icinga.com") require.NoError(t, err) require.NotNil(t, config) require.True(t, config.InsecureSkipVerify) }) t.Run("Missing client certificate", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Key: "test.key"} _, err := tlsConfig.MakeConfig("icinga.com") require.ErrorContains(t, err, "client certificate missing") }) t.Run("Missing private key", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Cert: "test.crt"} _, err := tlsConfig.MakeConfig("icinga.com") require.ErrorContains(t, err, "private key missing") }) t.Run("x509", func(t *testing.T) { cert, key, err := generateCert("cert", generateCertOptions{}) require.NoError(t, err) certFile, err := os.CreateTemp("", "cert-*.pem") require.NoError(t, err) defer func(name string) { _ = os.Remove(name) }(certFile.Name()) err = pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) require.NoError(t, err) keyFile, err := os.CreateTemp("", "key-*.pem") require.NoError(t, err) defer func(name string) { _ = os.Remove(name) }(keyFile.Name()) keyBytes, err := x509.MarshalPKCS8PrivateKey(key) require.NoError(t, err) err = pem.Encode(keyFile, &pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes}) require.NoError(t, err) ca, _, err := generateCert("ca", generateCertOptions{ca: true}) require.NoError(t, err) caFile, err := os.CreateTemp("", "ca-*.pem") require.NoError(t, err) defer func(name string) { _ = os.Remove(name) }(caFile.Name()) err = pem.Encode(caFile, &pem.Block{Type: "CERTIFICATE", Bytes: ca.Raw}) require.NoError(t, err) corruptFile, err := os.CreateTemp("", "corrupt-*.pem") require.NoError(t, err) defer func(name string) { _ = os.Remove(name) }(corruptFile.Name()) err = os.WriteFile(corruptFile.Name(), []byte("-----BEGIN CORRUPT-----\nOOPS\n-----END CORRUPT-----"), 0600) require.NoError(t, err) t.Run("Valid certificate and key", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Cert: certFile.Name(), Key: keyFile.Name()} config, err := tlsConfig.MakeConfig("icinga.com") require.NoError(t, err) require.NotNil(t, config) require.Len(t, config.Certificates, 1) }) t.Run("Valid certificate and key as PEM", func(t *testing.T) { certRaw, err := os.ReadFile(certFile.Name()) require.NoError(t, err) keyRaw, err := os.ReadFile(keyFile.Name()) require.NoError(t, err) tlsConfig := &TLS{Enable: true, Cert: string(certRaw), Key: string(keyRaw)} config, err := tlsConfig.MakeConfig("icinga.com") require.NoError(t, err) require.NotNil(t, config) require.Len(t, config.Certificates, 1) }) t.Run("Valid certificate and key, mixed file and PEM", func(t *testing.T) { keyRaw, err := os.ReadFile(keyFile.Name()) require.NoError(t, err) tlsConfig := &TLS{Enable: true, Cert: certFile.Name(), Key: string(keyRaw)} config, err := tlsConfig.MakeConfig("icinga.com") require.NoError(t, err) require.NotNil(t, config) require.Len(t, config.Certificates, 1) }) t.Run("Mismatched certificate and key", func(t *testing.T) { _key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.NoError(t, err) _keyFile, err := os.CreateTemp("", "key-*.pem") require.NoError(t, err) defer func(name string) { _ = os.Remove(name) }(_keyFile.Name()) _keyBytes, err := x509.MarshalPKCS8PrivateKey(_key) require.NoError(t, err) err = pem.Encode(_keyFile, &pem.Block{Type: "PRIVATE KEY", Bytes: _keyBytes}) require.NoError(t, err) tlsConfig := &TLS{Enable: true, Cert: certFile.Name(), Key: _keyFile.Name()} _, err = tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Invalid certificate path", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Cert: "nonexistent.crt", Key: keyFile.Name()} _, err := tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Invalid certificate permissions", func(t *testing.T) { fileInfo, err := certFile.Stat() require.NoError(t, err) defer func() { err := certFile.Chmod(fileInfo.Mode()) require.NoError(t, err) }() err = certFile.Chmod(0000) require.NoError(t, err) tlsConfig := &TLS{Enable: true, Cert: certFile.Name(), Key: keyFile.Name()} _, err = tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Corrupt certificate", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Cert: corruptFile.Name(), Key: keyFile.Name()} _, err := tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Corrupt certificate as PEM", func(t *testing.T) { corruptRaw, err := os.ReadFile(corruptFile.Name()) require.NoError(t, err) keyRaw, err := os.ReadFile(keyFile.Name()) require.NoError(t, err) tlsConfig := &TLS{Enable: true, Cert: string(corruptRaw), Key: string(keyRaw)} _, err = tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Invalid key path", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Cert: certFile.Name(), Key: "nonexistent.key"} _, err := tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Invalid key permissions", func(t *testing.T) { fileInfo, err := keyFile.Stat() require.NoError(t, err) defer func() { err := keyFile.Chmod(fileInfo.Mode()) require.NoError(t, err) }() err = keyFile.Chmod(0000) require.NoError(t, err) tlsConfig := &TLS{Enable: true, Cert: certFile.Name(), Key: keyFile.Name()} _, err = tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Corrupt key", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Cert: certFile.Name(), Key: corruptFile.Name()} _, err := tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Valid CA", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Ca: caFile.Name()} config, err := tlsConfig.MakeConfig("icinga.com") require.NoError(t, err) require.NotNil(t, config) require.NotNil(t, config.RootCAs) }) t.Run("Valid CA as PEM", func(t *testing.T) { caRaw, err := os.ReadFile(caFile.Name()) require.NoError(t, err) tlsConfig := &TLS{Enable: true, Ca: string(caRaw)} config, err := tlsConfig.MakeConfig("icinga.com") require.NoError(t, err) require.NotNil(t, config) require.NotNil(t, config.RootCAs) }) t.Run("Invalid CA path", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Ca: "nonexistent.ca"} _, err := tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Invalid CA permissions", func(t *testing.T) { fileInfo, err := caFile.Stat() require.NoError(t, err) defer func() { err := caFile.Chmod(fileInfo.Mode()) require.NoError(t, err) }() err = caFile.Chmod(0000) require.NoError(t, err) tlsConfig := &TLS{Enable: true, Ca: caFile.Name()} _, err = tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) t.Run("Corrupt CA", func(t *testing.T) { tlsConfig := &TLS{Enable: true, Ca: corruptFile.Name()} _, err := tlsConfig.MakeConfig("icinga.com") require.Error(t, err) }) }) } type generateCertOptions struct { ca bool issuer *x509.Certificate issuerKey crypto.PrivateKey } func generateCert(cn string, options generateCertOptions) (*x509.Certificate, crypto.PrivateKey, error) { privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return nil, nil, err } serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) if err != nil { return nil, nil, err } template := &x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{CommonName: cn}, NotBefore: time.Now().Add(-1 * time.Hour), NotAfter: time.Now().Add(24 * time.Hour), KeyUsage: x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: options.ca, } var issuer *x509.Certificate var issuerKey crypto.PrivateKey if options.issuer != nil { if options.issuerKey == nil { return nil, nil, errors.New("issuerKey required if issuer set") } issuer = options.issuer issuerKey = options.issuerKey } else { issuer = template issuerKey = privateKey } der, err := x509.CreateCertificate(rand.Reader, template, issuer, privateKey.Public(), issuerKey) if err != nil { return nil, nil, err } cert, err := x509.ParseCertificate(der) if err != nil { return nil, nil, err } return cert, privateKey, nil } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/config/config.go0000644000000000000000000002665015024302466026261 0ustar rootroot// Package config provides utilities for configuration parsing and loading. // It includes functionality for handling command-line flags and // loading configuration from YAML files and environment variables, // with additional support for setting default values and validation. // Additionally, it provides a struct that defines common settings for a TLS client. package config import ( stderrors "errors" "fmt" "github.com/caarlos0/env/v11" "github.com/creasty/defaults" "github.com/goccy/go-yaml" "github.com/jessevdk/go-flags" "github.com/pkg/errors" "io/fs" "os" "reflect" ) // ErrInvalidArgument is the error returned by any function that loads configuration if // the parsing result cannot be stored in the value pointed to by the specified argument, // which must be a non-nil struct pointer. var ErrInvalidArgument = stderrors.New("invalid argument") // ErrInvalidConfiguration is attached to errors returned by any function that loads configuration when // the configuration is invalid, // i.e. if the Validate method of the provided [Validator] interface returns an error, // which is then propagated by these functions. // Note that for such errors, errors.Is() will recognize both ErrInvalidConfiguration and // the original errors returned from Validate. var ErrInvalidConfiguration = stderrors.New("invalid configuration") // FromYAMLFile parses the given YAML file and stores the result // in the value pointed to by v. If v is nil or not a struct pointer, // FromYAMLFile returns an [ErrInvalidArgument] error. // // It is possible to define default values via the struct tag `default`. // // The function also validates the configuration using the Validate method // of the provided [Validator] interface. // Any error returned from Validate is propagated with [ErrInvalidConfiguration] attached, // allowing errors.Is() checks on the returned errors to recognize both ErrInvalidConfiguration and // the original errors returned from Validate. // // Example usage: // // type Config struct { // ServerAddress string `yaml:"server_address" default:"localhost:8080"` // TLS config.TLS `yaml:",inline"` // } // // func (c *Config) Validate() error { // if _, _, err := net.SplitHostPort(c.ServerAddress); err != nil { // return errors.Wrapf(err, "invalid server address: %s", c.ServerAddress) // } // // if err := c.TLS.Validate(); err != nil { // return errors.WithStack(err) // } // // return nil // } // // func main() { // var cfg Config // if err := config.FromYAMLFile("config.yml", &cfg); err != nil { // log.Fatalf("error loading config: %v", err) // } // // tlsCfg, err := cfg.TLS.MakeConfig("icinga.com") // if err != nil { // log.Fatalf("error creating TLS config: %v", err) // } // // // ... // } func FromYAMLFile(name string, v Validator) error { if err := validateNonNilStructPointer(v); err != nil { return errors.WithStack(err) } // #nosec G304 -- Accept user-controlled input for config file. f, err := os.Open(name) if err != nil { return errors.Wrap(err, "can't open YAML file "+name) } defer func(f *os.File) { _ = f.Close() }(f) if err := defaults.Set(v); err != nil { return errors.Wrap(err, "can't set config defaults") } d := yaml.NewDecoder(f, yaml.DisallowUnknownField()) if err := d.Decode(v); err != nil { return errors.Wrap(err, "can't parse YAML file "+name) } if err := v.Validate(); err != nil { return fmt.Errorf("%w: %w", ErrInvalidConfiguration, errors.WithStack(err)) } return nil } // EnvOptions is a type alias for [env.Options], so that only this package needs to import [env]. type EnvOptions = env.Options // FromEnv parses environment variables and stores the result in the value pointed to by v. // If v is nil or not a struct pointer, FromEnv returns an [ErrInvalidArgument] error. // // It is possible to define default values via the struct tag `default`. // // The function also validates the configuration using the Validate method // of the provided [Validator] interface. // Any error returned from Validate is propagated with [ErrInvalidConfiguration] attached, // allowing errors.Is() checks on the returned errors to recognize both ErrInvalidConfiguration and // the original errors returned from Validate. // // Example usage: // // type Config struct { // ServerAddress string `env:"SERVER_ADDRESS" default:"localhost:8080"` // TLS config.TLS // } // // func (c *Config) Validate() error { // if _, _, err := net.SplitHostPort(c.ServerAddress); err != nil { // return errors.Wrapf(err, "invalid server address: %s", c.ServerAddress) // } // // if err := c.TLS.Validate(); err != nil { // return errors.WithStack(err) // } // // return nil // } // // func main() { // var cfg Config // if err := config.FromEnv(cfg, config.EnvOptions{}); err != nil { // log.Fatalf("error loading config: %v", err) // } // // tlsCfg, err := cfg.TLS.MakeConfig("icinga.com") // if err != nil { // log.Fatalf("error creating TLS config: %v", err) // } // // // ... // } func FromEnv(v Validator, options EnvOptions) error { if err := validateNonNilStructPointer(v); err != nil { return errors.WithStack(err) } if err := defaults.Set(v); err != nil { return errors.Wrap(err, "can't set config defaults") } if err := env.ParseWithOptions(v, options); err != nil { return errors.Wrap(err, "can't parse environment variables") } if err := v.Validate(); err != nil { return fmt.Errorf("%w: %w", ErrInvalidConfiguration, errors.WithStack(err)) } return nil } // LoadOptions contains options for loading configuration from both files and environment variables. type LoadOptions struct { // Flags provides access to specific command line flag values. Flags Flags // EnvOptions contains options for loading configuration from environment variables. EnvOptions EnvOptions } // Load loads configuration from both YAML files and environment variables and // stores the result in the value pointed to by v. // If v is nil or not a struct pointer, // Load returns an [ErrInvalidArgument] error. // // It is possible to define default values via the struct tag `default`. // // The function also validates the configuration using the Validate method // of the provided [Validator] interface. // Any error returned from Validate is propagated with [ErrInvalidConfiguration] attached, // allowing errors.Is() checks on the returned errors to recognize both ErrInvalidConfiguration and // the original errors returned from Validate. // // This function handles configuration loading in three scenarios: // // 1. Load configuration exclusively from YAML files when no applicable environment variables are set. // 2. Combine YAML file and environment variable configurations, allowing environment variables to // supplement or override possible incomplete YAML data. // 3. Load entirely from environment variables if the default YAML config file is missing and // no specific config path is provided. // // Example usage: // // const DefaultConfigPath = "/path/to/config.yml" // // type Flags struct { // Config string `short:"c" long:"config" description:"Path to config file"` // } // // func (f Flags) GetConfigPath() string { // if f.Config == "" { // return DefaultConfigPath // } // // return f.Config // } // // func (f Flags) IsExplicitConfigPath() bool { // return f.Config != "" // } // // type Config struct { // ServerAddress string `yaml:"server_address" env:"SERVER_ADDRESS" default:"localhost:8080"` // TLS config.TLS `yaml:",inline"` // } // // func (c *Config) Validate() error { // if _, _, err := net.SplitHostPort(c.ServerAddress); err != nil { // return errors.Wrapf(err, "invalid server address: %s", c.ServerAddress) // } // // if err := c.TLS.Validate(); err != nil { // return errors.WithStack(err) // } // // return nil // } // // func main() { // var flags Flags // if err := config.ParseFlags(&flags); err != nil { // log.Fatalf("error parsing flags: %v", err) // } // // var cfg Config // if err := config.Load(&cfg, config.LoadOptions{Flags: flags, EnvOptions: config.EnvOptions{}}); err != nil { // log.Fatalf("error loading config: %v", err) // } // // tlsCfg, err := cfg.TLS.MakeConfig("icinga.com") // if err != nil { // log.Fatalf("error creating TLS config: %v", err) // } // // // ... // } func Load(v Validator, options LoadOptions) error { if err := validateNonNilStructPointer(v); err != nil { return errors.WithStack(err) } var configFileIsDefaultAndDoesNotExist bool if err := FromYAMLFile(options.Flags.GetConfigPath(), v); err != nil { // Allow continuation with FromEnv by handling: // // - ErrInvalidConfiguration: // The configuration may be incomplete and will be revalidated in FromEnv. // // - Non-existent file errors: // If no explicit config path is set, fallback to environment variables is allowed. configIsInvalid := errors.Is(err, ErrInvalidConfiguration) configFileIsDefaultAndDoesNotExist = errors.Is(err, fs.ErrNotExist) && !options.Flags.IsExplicitConfigPath() if !(configIsInvalid || configFileIsDefaultAndDoesNotExist) { return errors.WithStack(err) } } // Call FromEnv regardless of the outcome from FromYAMLFile. // If no environment variables are set, configuration relies entirely on YAML. // Otherwise, environment variables can supplement, override YAML settings, or serve as the sole source. // FromEnv also includes validation, ensuring completeness after considering both sources. if err := FromEnv(v, options.EnvOptions); err != nil { if configFileIsDefaultAndDoesNotExist { return stderrors.Join( errors.WithStack(err), fmt.Errorf( "default config file %s does not exist but can be ignored if"+ " the configuration is intended to be entirely provided via environment variables", options.Flags.GetConfigPath(), ), ) } return errors.WithStack(err) } return nil } // ParseFlags parses CLI flags and stores the result // in the value pointed to by v. If v is nil or not a struct pointer, // ParseFlags returns an [ErrInvalidArgument] error. // // ParseFlags adds a default Help Options group, // which contains the options -h and --help. // If either option is specified on the command line, // ParseFlags prints the help message to [os.Stdout] and exits. // // Note that errors are not printed automatically, // so error handling is the sole responsibility of the caller. // // Example usage: // // type Flags struct { // Config string `short:"c" long:"config" description:"Path to config file" required:"true"` // } // // func main() { // var flags Flags // if err := config.ParseFlags(&flags); err != nil { // log.Fatalf("error parsing flags: %v", err) // } // // // ... // } func ParseFlags(v any) error { if err := validateNonNilStructPointer(v); err != nil { return errors.WithStack(err) } parser := flags.NewParser(v, flags.Default^flags.PrintErrors) if _, err := parser.Parse(); err != nil { var flagErr *flags.Error if errors.As(err, &flagErr) && errors.Is(flagErr.Type, flags.ErrHelp) { _, _ = fmt.Fprintln(os.Stdout, flagErr) os.Exit(0) } return errors.Wrap(err, "can't parse CLI flags") } return nil } // validateNonNilStructPointer checks if the provided value is a non-nil pointer to a struct. // It returns an error if the value is not a pointer, is nil, or does not point to a struct. func validateNonNilStructPointer(v any) error { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Pointer || rv.IsNil() || rv.Elem().Kind() != reflect.Struct { return errors.Wrapf(ErrInvalidArgument, "non-nil struct pointer expected, got %T", v) } return nil } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/config/config_test.go0000644000000000000000000003510715024302466027315 0ustar rootrootpackage config import ( "encoding/json" "fmt" "github.com/icinga/icinga-go-library/testutils" "github.com/pkg/errors" "github.com/stretchr/testify/require" "io/fs" "os" "os/exec" "reflect" "testing" ) // errInvalidConfiguration is an error that indicates invalid configuration. var errInvalidConfiguration = errors.New("invalid configuration") // validateValid is a struct used to represent a valid configuration. type validateValid struct{} // Validate returns nil indicating the configuration is valid. func (*validateValid) Validate() error { return nil } // validateInvalid is a struct used to represent an invalid configuration. type validateInvalid struct{} // Validate returns errInvalidConfiguration indicating the configuration is invalid. func (*validateInvalid) Validate() error { return errInvalidConfiguration } // simpleConfig is an always valid test configuration struct with only one key. type simpleConfig struct { Key string `yaml:"key" env:"KEY"` validateValid } // inlinedConfigPart is a part of a test configuration that will be inlined. type inlinedConfigPart struct { Key string `yaml:"inlined-key" env:"INLINED_KEY"` } // inlinedConfig is an always valid test configuration struct with a key and an inlined part from inlinedConfigPart. type inlinedConfig struct { Key string `yaml:"key" env:"KEY"` Inlined inlinedConfigPart `yaml:",inline"` validateValid } // embeddedConfigPart is a part of a test configuration that will be embedded. type embeddedConfigPart struct { Key string `yaml:"embedded-key" env:"EMBEDDED_KEY"` } // embeddedConfig is an always valid test configuration struct with a key and an embedded part from embeddedConfigPart. type embeddedConfig struct { Key string `yaml:"key" env:"KEY"` Embedded embeddedConfigPart `yaml:"embedded" envPrefix:"EMBEDDED_"` validateValid } // defaultConfigPart is a part of a test configuration that defines a default value. type defaultConfigPart struct { Key string `yaml:"default-key" env:"DEFAULT_KEY" default:"default-value"` } // defaultConfig is an always valid test configuration struct with a key and // an inlined part with defaults from defaultConfigPart. type defaultConfig struct { Key string `yaml:"key" env:"KEY"` Default defaultConfigPart `yaml:",inline"` validateValid } // invalidConfig is an always invalid test configuration struct with only one key. type invalidConfig struct { Key string `yaml:"key" env:"KEY"` validateInvalid } // configWithInvalidDefault is a test configuration struct used to verify error propagation from defaults.Set(). // It intentionally defines an invalid default value for a map, // which the defaults package parses using json.Unmarshal(). // The test then asserts that a json.SyntaxError is returned. // This approach is necessary because the defaults package does not return errors for parsing scalar types, // which was quite unexpected when writing the test. type configWithInvalidDefault struct { Key string `yaml:"key" env:"KEY"` InvalidDefaultJson map[any]any `yaml:"invalid" envPrefix:"INVALID_" default:"a"` validateValid } // nonStructValidator is a non-struct type that implements the Validator interface but // cannot be used in FromEnv and FromYAMLFile to parse configuration into. type nonStructValidator int func (nonStructValidator) Validate() error { return nil } // configTests specifies common test cases for the FromEnv and FromYAMLFile functions. var configTests = []testutils.TestCase[Validator, testutils.ConfigTestData]{ { Name: "Simple Config", Data: testutils.ConfigTestData{ Yaml: `key: value`, Env: map[string]string{"KEY": "value"}, }, Expected: &simpleConfig{ Key: "value", }, }, { Name: "Inlined Config", Data: testutils.ConfigTestData{ Yaml: ` key: value inlined-key: inlined-value`, Env: map[string]string{ "KEY": "value", "INLINED_KEY": "inlined-value", }}, Expected: &inlinedConfig{ Key: "value", Inlined: inlinedConfigPart{Key: "inlined-value"}, }, }, { Name: "Embedded Config", Data: testutils.ConfigTestData{ Yaml: ` key: value embedded: embedded-key: embedded-value`, Env: map[string]string{ "KEY": "value", "EMBEDDED_EMBEDDED_KEY": "embedded-value", }}, Expected: &embeddedConfig{ Key: "value", Embedded: embeddedConfigPart{Key: "embedded-value"}, }, }, { Name: "Defaults", Data: testutils.ConfigTestData{ Yaml: `key: value`, Env: map[string]string{"KEY": "value"}}, Expected: &defaultConfig{ Key: "value", Default: defaultConfigPart{Key: "default-value"}, }, }, { Name: "Overriding Defaults", Data: testutils.ConfigTestData{ Yaml: ` key: value default-key: overridden-value`, Env: map[string]string{ "KEY": "value", "DEFAULT_KEY": "overridden-value", }}, Expected: &defaultConfig{ Key: "value", Default: defaultConfigPart{Key: "overridden-value"}, }, }, { Name: "Validate invalid", Data: testutils.ConfigTestData{ Yaml: `key: value`, Env: map[string]string{"KEY": "value"}, }, Expected: &invalidConfig{ Key: "value", }, Error: testutils.ErrorIs(errInvalidConfiguration), }, { Name: "Error propagation from defaults.Set()", Data: testutils.ConfigTestData{ Yaml: `key: value`, Env: map[string]string{"KEY": "value"}, }, Expected: &configWithInvalidDefault{}, Error: testutils.ErrorAs[*json.SyntaxError](), }, } func TestFromEnv(t *testing.T) { for _, tc := range configTests { t.Run(tc.Name, tc.F(func(data testutils.ConfigTestData) (Validator, error) { actual := createValidatorInstance(tc.Expected) err := FromEnv(actual, EnvOptions{Environment: data.Env}) return actual, err })) } t.Run("Nil pointer argument", func(t *testing.T) { var config *struct{ Validator } err := FromEnv(config, EnvOptions{}) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Nil argument", func(t *testing.T) { err := FromEnv(nil, EnvOptions{}) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Non-struct pointer argument", func(t *testing.T) { var config nonStructValidator err := FromEnv(&config, EnvOptions{}) require.ErrorIs(t, err, ErrInvalidArgument) }) } func TestFromYAMLFile(t *testing.T) { for _, tc := range configTests { t.Run(tc.Name, tc.F(func(data testutils.ConfigTestData) (Validator, error) { actual := createValidatorInstance(tc.Expected) var err error testutils.WithYAMLFile(t, data.Yaml, func(file *os.File) { err = FromYAMLFile(file.Name(), actual) }) return actual, err })) } type invalidYamlTestCase struct { // Test case name. name string // Content of the YAML file. content string } invalidYamlTests := []invalidYamlTestCase{ { name: "Empty YAML", content: "", }, { name: "Empty YAML with directive separator", content: `---`, }, { name: "Faulty YAML", content: `:\n`, }, { name: "Key only", content: `key`, }, } for _, tc := range invalidYamlTests { t.Run(tc.name, func(t *testing.T) { testutils.WithYAMLFile(t, tc.content, func(file *os.File) { err := FromYAMLFile(file.Name(), &validateValid{}) require.Error(t, err) // Since the YAML library does not export all possible error types, // we must ensure that the error returned is not one of our own errors. require.NotErrorIs(t, err, ErrInvalidArgument) require.NotErrorIs(t, err, errInvalidConfiguration) }) }) } t.Run("Nil pointer argument", func(t *testing.T) { var config *struct{ Validator } err := FromYAMLFile("", config) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Nil argument", func(t *testing.T) { err := FromYAMLFile("", nil) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Non-struct pointer argument", func(t *testing.T) { testutils.WithYAMLFile(t, `key: value`, func(file *os.File) { var config nonStructValidator err := FromYAMLFile(file.Name(), &config) require.ErrorIs(t, err, ErrInvalidArgument) }) }) t.Run("Non-existent file", func(t *testing.T) { err := FromYAMLFile("nonexistent.yaml", &validateValid{}) require.ErrorIs(t, err, fs.ErrNotExist) }) t.Run("Permission denied", func(t *testing.T) { var pathError *fs.PathError yamlFile, err := os.CreateTemp("", "*.yaml") require.NoError(t, err) require.NoError(t, yamlFile.Chmod(0000)) require.NoError(t, yamlFile.Close()) defer func(name string) { _ = os.Remove(name) }(yamlFile.Name()) err = FromYAMLFile(yamlFile.Name(), &validateValid{}) require.ErrorAs(t, err, &pathError) }) } // testFlags is a struct that implements the Flags interface. // It holds information about the configuration file path and whether it was explicitly set. type testFlags struct { configPath string // The path to the configuration file. explicitConfigPath bool // Indicates if the config path was explicitly set. } // GetConfigPath returns the path to the configuration file. func (f testFlags) GetConfigPath() string { return f.configPath } // IsExplicitConfigPath indicates whether the configuration file path was explicitly set. func (f testFlags) IsExplicitConfigPath() bool { return f.explicitConfigPath } func TestLoad(t *testing.T) { loadTests := []testutils.TestCase[Validator, testutils.ConfigTestData]{ { Name: "Load from YAML only", Data: testutils.ConfigTestData{ Yaml: `key: value`, }, Expected: &simpleConfig{ Key: "value", }, }, { Name: "Load from Env only", Data: testutils.ConfigTestData{ Env: map[string]string{"KEY": "value"}, }, Expected: &simpleConfig{ Key: "value", }, }, { Name: "YAML and Env; Env overrides", Data: testutils.ConfigTestData{ Yaml: `key: yaml-value`, Env: map[string]string{"KEY": "env-value"}, }, Expected: &simpleConfig{ Key: "env-value", }, }, { Name: "YAML and Env; Env overrides defaults", Data: testutils.ConfigTestData{ Yaml: `key: yaml-value`, Env: map[string]string{ "DEFAULT_KEY": "env-value", }}, Expected: &defaultConfig{ Key: "yaml-value", Default: defaultConfigPart{Key: "env-value"}, }, }, { Name: "YAML and Env; Env supplements", Data: testutils.ConfigTestData{ Yaml: `key: yaml-value`, Env: map[string]string{"EMBEDDED_EMBEDDED_KEY": "env-value"}}, Expected: &embeddedConfig{ Key: "yaml-value", Embedded: embeddedConfigPart{Key: "env-value"}, }, }, { Name: "Validate invalid", Data: testutils.ConfigTestData{ Yaml: `key: value`, Env: map[string]string{"KEY": "value"}, }, Expected: &invalidConfig{ Key: "value", }, Error: testutils.ErrorIs(errInvalidConfiguration), }, } for _, tc := range loadTests { t.Run(tc.Name, tc.F(func(data testutils.ConfigTestData) (Validator, error) { actual := createValidatorInstance(tc.Expected) var err error if data.Yaml != "" { testutils.WithYAMLFile(t, data.Yaml, func(file *os.File) { err = Load(actual, LoadOptions{ Flags: testFlags{ configPath: file.Name(), explicitConfigPath: true, }, EnvOptions: EnvOptions{Environment: data.Env}, }) }) } else { err = Load(actual, LoadOptions{Flags: testFlags{}, EnvOptions: EnvOptions{Environment: data.Env}}) } return actual, err })) } t.Run("Nil pointer argument", func(t *testing.T) { var config *struct{ Validator } err := Load(config, LoadOptions{}) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Nil argument", func(t *testing.T) { err := Load(nil, LoadOptions{}) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Non-struct pointer argument", func(t *testing.T) { var config nonStructValidator err := Load(config, LoadOptions{}) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Explicit config; file does not exist", func(t *testing.T) { err := Load(&validateValid{}, LoadOptions{Flags: testFlags{explicitConfigPath: true}}) require.ErrorIs(t, err, fs.ErrNotExist) }) } func TestParseFlags(t *testing.T) { t.Run("Simple flags", func(t *testing.T) { originalArgs := os.Args defer func() { os.Args = originalArgs }() os.Args = []string{"cmd", "--test-flag=value"} type Flags struct { TestFlag string `long:"test-flag"` } var flags Flags err := ParseFlags(&flags) require.NoError(t, err) require.Equal(t, "value", flags.TestFlag) }) t.Run("Nil pointer argument", func(t *testing.T) { var flags *any err := ParseFlags(flags) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Nil argument", func(t *testing.T) { err := ParseFlags(nil) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Non-struct pointer argument", func(t *testing.T) { var flags int err := ParseFlags(&flags) require.ErrorIs(t, err, ErrInvalidArgument) }) t.Run("Exit on help flag", func(t *testing.T) { // This test case checks the behavior of ParseFlags() when the help flag (e.g. -h) is provided. // Since ParseFlags() calls os.Exit() upon encountering the help flag, we need to run this // test in a separate subprocess to capture and verify the output without terminating the // main test process. if os.Getenv("TEST_HELP_FLAG") == "1" { // This block runs in the subprocess. type Flags struct{} var flags Flags originalArgs := os.Args defer func() { os.Args = originalArgs }() os.Args = []string{"cmd", "-h"} if err := ParseFlags(&flags); err != nil { panic(err) } return } // This block runs in the main test process. It starts this test again in a subprocess with the // TEST_HELP_FLAG=1 environment variable provided in order to run the above code block. // #nosec G204 -- The subprocess is launched with controlled input for testing purposes. // The command and arguments are derived from the test framework and are not influenced by external input. cmd := exec.Command(os.Args[0], fmt.Sprintf("-test.run=%s", t.Name())) cmd.Env = append(os.Environ(), "TEST_HELP_FLAG=1") out, err := cmd.CombinedOutput() require.NoError(t, err) // When the help flag is provided, ParseFlags() outputs usage information, // including "-h, --help Show this help message" (whitespace may vary). require.Contains(t, string(out), "-h, --help") }) } // createValidatorInstance creates a new instance of the same type as the provided value. // // Since our test cases only define the expected configuration, // we need to create a new instance of that type for our functions to parse the configuration into. func createValidatorInstance(v Validator) Validator { v, ok := reflect.New(reflect.TypeOf(v).Elem()).Interface().(Validator) if !ok { panic(fmt.Sprintf("cannot create a Validator, got %T", v)) } return v } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/config/contracts.go0000644000000000000000000000262115024302466027004 0ustar rootrootpackage config // Validator is an interface that must be implemented by any struct into which configuration is intended to be loaded. // // The Validate method checks the configuration values and // returns an error if any value is invalid or missing when required. // // For fields such as file paths, the responsibility of Validate is limited to // verifying the presence and format of the value, // not checking external conditions like file existence or readability. // This principle applies generally to any field where external validation // (e.g., network availability, resource accessibility) is beyond the scope of basic configuration validation. type Validator interface { // Validate checks the configuration values and // returns an error if any value is invalid or missing when required. Validate() error } // Flags is an interface that provides methods related to access the // configuration file path specified via command line flags. // This interface is meant to be implemented by flag structs containing // a switch for the configuration file path. type Flags interface { // GetConfigPath retrieves the path to the configuration file as specified by command line flags, // or returns a default path if none was provided. GetConfigPath() string // IsExplicitConfigPath indicates whether the configuration file path was // explicitly set through command line flags. IsExplicitConfigPath() bool } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/strcase/0000755000000000000000000000000015024302466024653 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/strcase/strcase.go0000644000000000000000000000271715024302466026655 0ustar rootroot// Package strcase implements functions to convert a camelCase UTF-8 string into various cases. // // New delimiters will be inserted based on the following transitions: // - On any change from lowercase to uppercase letter. // - On any change from number to uppercase letter. package strcase import ( "strings" "unicode" ) // Delimited converts a string to delimited.lower.case, here using `.` as delimiter. func Delimited(s string, d rune) string { return convert(s, unicode.LowerCase, d) } // ScreamingDelimited converts a string to DELIMITED.UPPER.CASE, here using `.` as delimiter. func ScreamingDelimited(s string, d rune) string { return convert(s, unicode.UpperCase, d) } // Snake converts a string to snake_case. func Snake(s string) string { return Delimited(s, '_') } // ScreamingSnake converts a string to SCREAMING_SNAKE_CASE. func ScreamingSnake(s string) string { return ScreamingDelimited(s, '_') } // convert converts a camelCase UTF-8 string into various cases. // _case must be unicode.LowerCase or unicode.UpperCase. func convert(s string, _case int, d rune) string { if len(s) == 0 { return s } n := strings.Builder{} n.Grow(len(s) + 2) // Allow adding at least 2 delimiters without another allocation. var prevRune rune for i, r := range s { if i > 0 && unicode.IsUpper(r) && (unicode.IsNumber(prevRune) || unicode.IsLower(prevRune)) { n.WriteRune(d) } n.WriteRune(unicode.To(_case, r)) prevRune = r } return n.String() } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/strcase/strcase_test.go0000644000000000000000000000246215024302466027711 0ustar rootrootpackage strcase import ( "strings" "testing" ) var tests = [][]string{ {"", ""}, {"Test", "test"}, {"test", "test"}, {"testCase", "test_case"}, {"test_case", "test_case"}, {"TestCase", "test_case"}, {"Test_Case", "test_case"}, {"ID", "id"}, {"userID", "user_id"}, {"UserID", "user_id"}, {"ManyManyWords", "many_many_words"}, {"manyManyWords", "many_many_words"}, {"icinga2", "icinga2"}, {"Icinga2Version", "icinga2_version"}, {"k8sVersion", "k8s_version"}, {"1234", "1234"}, {"a1b2c3d4", "a1b2c3d4"}, {"with1234digits", "with1234digits"}, {"with1234Digits", "with1234_digits"}, {"IPv4", "ipv4"}, {"IPv4Address", "ipv4_address"}, {"caféCrème", "café_crème"}, {"0℃", "0℃"}, {"~0", "~0"}, {"icinga💯points", "icinga💯points"}, {"😃🙃😀", "😃🙃😀"}, {"こんにちは", "こんにちは"}, {"\xff\xfe\xfd", "���"}, {"\xff", "�"}, } func TestSnake(t *testing.T) { for _, test := range tests { s, expected := test[0], test[1] actual := Snake(s) if actual != expected { t.Errorf("%q: %q != %q", s, actual, expected) } } } func TestScreamingSnake(t *testing.T) { for _, test := range tests { s, expected := test[0], strings.ToUpper(test[1]) actual := ScreamingSnake(s) if actual != expected { t.Errorf("%q: %q != %q", s, actual, expected) } } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/0000755000000000000000000000000015024302466024315 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/client_test.go0000644000000000000000000000335015024302466027162 0ustar rootrootpackage redis import ( "github.com/icinga/icinga-go-library/config" "github.com/icinga/icinga-go-library/logging" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" "testing" ) func TestNewClientFromConfig_GetAddr(t *testing.T) { tests := []struct { name string conf *Config addr string }{ { name: "redis-simple", conf: &Config{ Host: "example.com", }, addr: "redis://example.com:6379", }, { name: "redis-custom-port", conf: &Config{ Host: "example.com", Port: 6380, }, addr: "redis://example.com:6380", }, { name: "redis-acl", conf: &Config{ Host: "example.com", Username: "user", Password: "pass", }, addr: "redis://user@example.com:6379", }, { name: "redis-custom-database", conf: &Config{ Host: "example.com", Database: 23, }, addr: "redis://example.com:6379/23", }, { name: "redis-tls", conf: &Config{ Host: "example.com", TlsOptions: config.TLS{Enable: true}, }, addr: "redis+tls://example.com:6379", }, { name: "redis-with-everything", conf: &Config{ Host: "example.com", Port: 6380, Username: "user", Password: "pass", Database: 23, TlsOptions: config.TLS{Enable: true}, }, addr: "redis+tls://user@example.com:6380/23", }, { name: "redis-unix-domain-socket", conf: &Config{ Host: "/var/empty/redis.sock", }, addr: "redis://(/var/empty/redis.sock)", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { redis, err := NewClientFromConfig( test.conf, logging.NewLogger(zaptest.NewLogger(t).Sugar(), 0)) require.NoError(t, err) require.Equal(t, test.addr, redis.GetAddr()) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/config.go0000644000000000000000000000431615024302466026115 0ustar rootrootpackage redis import ( "github.com/icinga/icinga-go-library/config" "github.com/pkg/errors" "time" ) // Options define user configurable Redis options. type Options struct { BlockTimeout time.Duration `yaml:"block_timeout" env:"BLOCK_TIMEOUT" default:"1s"` HMGetCount int `yaml:"hmget_count" env:"HMGET_COUNT" default:"4096"` HScanCount int `yaml:"hscan_count" env:"HSCAN_COUNT" default:"4096"` MaxHMGetConnections int `yaml:"max_hmget_connections" env:"MAX_HMGET_CONNECTIONS" default:"8"` Timeout time.Duration `yaml:"timeout" env:"TIMEOUT" default:"30s"` XReadCount int `yaml:"xread_count" env:"XREAD_COUNT" default:"4096"` } // Validate checks constraints in the supplied Redis options and returns an error if they are violated. func (o *Options) Validate() error { if o.BlockTimeout <= 0 { return errors.New("block_timeout must be positive") } if o.HMGetCount < 1 { return errors.New("hmget_count must be at least 1") } if o.HScanCount < 1 { return errors.New("hscan_count must be at least 1") } if o.MaxHMGetConnections < 1 { return errors.New("max_hmget_connections must be at least 1") } if o.Timeout == 0 { return errors.New("timeout cannot be 0. Configure a value greater than zero, or use -1 for no timeout") } if o.XReadCount < 1 { return errors.New("xread_count must be at least 1") } return nil } // Config defines Config client configuration. type Config struct { Host string `yaml:"host" env:"HOST"` Port int `yaml:"port" env:"PORT"` Username string `yaml:"username" env:"USERNAME"` Password string `yaml:"password" env:"PASSWORD,unset"` Database int `yaml:"database" env:"DATABASE" default:"0"` TlsOptions config.TLS `yaml:",inline"` Options Options `yaml:"options" envPrefix:"OPTIONS_"` } // Validate checks constraints in the supplied Config configuration and returns an error if they are violated. func (r *Config) Validate() error { if r.Host == "" { return errors.New("Redis host missing") } if r.Username != "" && r.Password == "" { return errors.New("Redis password must be set, if username is provided") } return r.Options.Validate() } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/streams.go0000644000000000000000000000103515024302466026321 0ustar rootrootpackage redis // Streams represents a Redis stream key to ID mapping. type Streams map[string]string // Option returns the Redis stream key to ID mapping // as a slice of stream keys followed by their IDs // that is compatible for the Redis STREAMS option. func (s Streams) Option() []string { // len*2 because we're appending the IDs later. streams := make([]string, 0, len(s)*2) ids := make([]string, 0, len(s)) for key, id := range s { streams = append(streams, key) ids = append(ids, id) } return append(streams, ids...) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/streams_test.go0000644000000000000000000000113415024302466027360 0ustar rootrootpackage redis import ( "github.com/stretchr/testify/require" "testing" ) func TestStreams_Option(t *testing.T) { subtests := []struct { name string input Streams outputs [][]string }{ {"nil", nil, [][]string{{}}}, {"empty", Streams{}, [][]string{{}}}, {"one", Streams{"key": "id"}, [][]string{{"key", "id"}}}, {"two", Streams{"key1": "id1", "key2": "id2"}, [][]string{ {"key1", "key2", "id1", "id2"}, {"key2", "key1", "id2", "id1"}, }}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Contains(t, st.outputs, st.input.Option()) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/client.go0000644000000000000000000002365015024302466026130 0ustar rootrootpackage redis import ( "context" "crypto/tls" "fmt" "github.com/icinga/icinga-go-library/backoff" "github.com/icinga/icinga-go-library/com" "github.com/icinga/icinga-go-library/logging" "github.com/icinga/icinga-go-library/periodic" "github.com/icinga/icinga-go-library/retry" "github.com/icinga/icinga-go-library/utils" "github.com/pkg/errors" "github.com/redis/go-redis/v9" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" "net" "sync/atomic" "time" ) // Client is a wrapper around redis.Client with // streaming and logging capabilities. type Client struct { *redis.Client Options *Options logger *logging.Logger } // NewClient returns a new Client wrapper for a pre-existing redis.Client. func NewClient(client *redis.Client, logger *logging.Logger, options *Options) *Client { return &Client{Client: client, logger: logger, Options: options} } // NewClientFromConfig returns a new Client from Config. func NewClientFromConfig(c *Config, logger *logging.Logger) (*Client, error) { tlsConfig, err := c.TlsOptions.MakeConfig(c.Host) if err != nil { return nil, err } var dialer ctxDialerFunc dl := &net.Dialer{Timeout: 15 * time.Second} if tlsConfig == nil { dialer = dl.DialContext } else { dialer = (&tls.Dialer{NetDialer: dl, Config: tlsConfig}).DialContext } options := &redis.Options{ Dialer: dialWithLogging(dialer, logger), Username: c.Username, Password: c.Password, DB: c.Database, ReadTimeout: c.Options.Timeout, TLSConfig: tlsConfig, } if utils.IsUnixAddr(c.Host) { options.Network = "unix" options.Addr = c.Host } else { port := c.Port if port == 0 { port = 6379 } options.Network = "tcp" options.Addr = net.JoinHostPort(c.Host, fmt.Sprint(port)) } client := redis.NewClient(options) options = client.Options() options.PoolSize = max(32, options.PoolSize) options.MaxRetries = options.PoolSize + 1 // https://github.com/go-redis/redis/issues/1737 return NewClient(redis.NewClient(options), logger, &c.Options), nil } // GetAddr returns a URI-like Redis connection string. // // It has the following syntax: // // redis[+tls]://user@host[:port]/database func (c *Client) GetAddr() string { description := "redis" if c.Client.Options().TLSConfig != nil { description += "+tls" } description += "://" if username := c.Client.Options().Username; username != "" { description += username + "@" } if utils.IsUnixAddr(c.Client.Options().Addr) { description += "(" + c.Client.Options().Addr + ")" } else { description += c.Client.Options().Addr } if db := c.Client.Options().DB; db != 0 { description += fmt.Sprintf("/%d", db) } return description } // MarshalLogObject implements [zapcore.ObjectMarshaler], adding the redis address [Client.GetAddr] to each log message. func (c *Client) MarshalLogObject(encoder zapcore.ObjectEncoder) error { encoder.AddString("redis_address", c.GetAddr()) return nil } // HPair defines Redis hashes field-value pairs. type HPair struct { Field string Value string } // HYield yields HPair field-value pairs for all fields in the hash stored at key. func (c *Client) HYield(ctx context.Context, key string) (<-chan HPair, <-chan error) { pairs := make(chan HPair, c.Options.HScanCount) return pairs, com.WaitAsync(com.WaiterFunc(func() error { var counter com.Counter defer c.log(ctx, key, &counter).Stop() defer close(pairs) seen := make(map[string]struct{}) var cursor uint64 var err error var page []string for { cmd := c.HScan(ctx, key, cursor, "", int64(c.Options.HScanCount)) page, cursor, err = cmd.Result() if err != nil { return WrapCmdErr(cmd) } for i := 0; i < len(page); i += 2 { if _, ok := seen[page[i]]; ok { // Ignore duplicate returned by HSCAN. continue } seen[page[i]] = struct{}{} select { case pairs <- HPair{ Field: page[i], Value: page[i+1], }: counter.Inc() case <-ctx.Done(): return ctx.Err() } } if cursor == 0 { break } } return nil })) } // HMYield yields HPair field-value pairs for the specified fields in the hash stored at key. func (c *Client) HMYield(ctx context.Context, key string, fields ...string) (<-chan HPair, <-chan error) { pairs := make(chan HPair) return pairs, com.WaitAsync(com.WaiterFunc(func() error { var counter com.Counter defer c.log(ctx, key, &counter).Stop() g, ctx := errgroup.WithContext(ctx) defer func() { // Wait until the group is done so that we can safely close the pairs channel, // because on error, sem.Acquire will return before calling g.Wait(), // which can result in goroutines working on a closed channel. _ = g.Wait() close(pairs) }() // Use context from group. batches := utils.BatchSliceOfStrings(ctx, fields, c.Options.HMGetCount) sem := semaphore.NewWeighted(int64(c.Options.MaxHMGetConnections)) for batch := range batches { if err := sem.Acquire(ctx, 1); err != nil { return errors.Wrap(err, "can't acquire semaphore") } batch := batch g.Go(func() error { defer sem.Release(1) cmd := c.HMGet(ctx, key, batch...) vals, err := cmd.Result() if err != nil { return WrapCmdErr(cmd) } for i, v := range vals { if v == nil { c.logger.Warnf("HMGET %s: field %#v missing", key, batch[i]) continue } vStr, ok := v.(string) if !ok { c.logger.Warnf("HMGET %s: field %#v is not a string", key, batch[i]) continue } select { case pairs <- HPair{ Field: batch[i], Value: vStr, }: counter.Inc() case <-ctx.Done(): return ctx.Err() } } return nil }) } return g.Wait() })) } // XReadUntilResult (repeatedly) calls XREAD with the specified arguments until a result is returned. // Each call blocks at most for the duration specified in Options.BlockTimeout until data // is available before it times out and the next call is made. // This also means that an already set block timeout is overridden. func (c *Client) XReadUntilResult(ctx context.Context, a *redis.XReadArgs) ([]redis.XStream, error) { a.Block = c.Options.BlockTimeout for { cmd := c.XRead(ctx, a) // Explicitly check for context errors because go-redis v9 does not respect context.Canceled or // context.DeadlineExceeded unless Options.ContextTimeoutEnabled is set [^1] [^2], which we do not enable. // If the context is canceled or times out during XRead and there is no data to read, // XRead will **still** block until the block timeout is reached and // return redis.Nil instead of the context error. Without this check, // the function would return redis.Nil, potentially leading to unexpected errors for consumers. // // [^1]: https://github.com/redis/go-redis/issues/2556 // [^2]: https://github.com/redis/go-redis/issues/2682 if ctx.Err() != nil { return nil, ctx.Err() } streams, err := cmd.Result() if err != nil { // We need to retry the XREAD commands in the following situations: // - If Go Redis returns redis.Nil, it means no data was read from Redis — e.g. when the keys don’t // exist yet, and we will need to retry the operation again. // // - To prevent surpassing Go Redis's internal maximum retries or any other I/O timeouts [^1], it's // important to set a block timeout greater than zero for the XREAD commands, see the "a.Block" above. // However, setting a block timeout means that Go Redis will not retry any errors internally and will // instead return an I/O timeout error when exceeding the timeout. Thus, we need to handle this here and // retry it again. // // [^1]: https://github.com/redis/go-redis/issues/2131 if errors.Is(err, redis.Nil) || retry.Retryable(err) { continue } return streams, WrapCmdErr(cmd) } return streams, nil } } func (c *Client) log(ctx context.Context, key string, counter *com.Counter) periodic.Stopper { return periodic.Start(ctx, c.logger.Interval(), func(tick periodic.Tick) { // We may never get to progress logging here, // as fetching should be completed before the interval expires, // but if it does, it is good to have this log message. if count := counter.Reset(); count > 0 { c.logger.Debugf("Fetched %d items from %s", count, key) } }, periodic.OnStop(func(tick periodic.Tick) { c.logger.Debugf("Finished fetching from %s with %d items in %s", key, counter.Total(), tick.Elapsed) })) } type ctxDialerFunc = func(ctx context.Context, network, addr string) (net.Conn, error) // dialWithLogging returns a Redis Dialer with logging capabilities. func dialWithLogging(dialer ctxDialerFunc, logger *logging.Logger) ctxDialerFunc { // hadConnection captures if at least one successful connection was made. Since this function is only called once // and the returned closure is used, it can be used to synchronize this state across all dialers. var hadConnection atomic.Bool // dial behaves like net.Dialer#DialContext, // but re-tries on common errors that are considered retryable. return func(ctx context.Context, network, addr string) (conn net.Conn, err error) { retryTimeout := retry.DefaultTimeout if hadConnection.Load() { retryTimeout = 0 } err = retry.WithBackoff( ctx, func(ctx context.Context) (err error) { conn, err = dialer(ctx, network, addr) return }, retry.Retryable, backoff.DefaultBackoff, retry.Settings{ Timeout: retryTimeout, OnRetryableError: func(elapsed time.Duration, attempt uint64, err, lastErr error) { logger.Warnw("Can't connect to Redis. Retrying", zap.Error(err), zap.Duration("after", elapsed), zap.Uint64("attempt", attempt)) }, OnSuccess: func(elapsed time.Duration, attempt uint64, _ error) { hadConnection.Store(true) if attempt > 1 { logger.Infow("Reconnected to Redis", zap.Duration("after", elapsed), zap.Uint64("attempts", attempt)) } }, }, ) err = errors.Wrap(err, "can't connect to Redis") return } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/alias.go0000644000000000000000000000060215024302466025733 0ustar rootrootpackage redis import "github.com/redis/go-redis/v9" // Alias definitions of commonly used go-redis exports, // so that only this redis package needs to be imported and not go-redis additionally. type IntCmd = redis.IntCmd type Pipeliner = redis.Pipeliner type XAddArgs = redis.XAddArgs type XMessage = redis.XMessage type XReadArgs = redis.XReadArgs var NewScript = redis.NewScript dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/config_test.go0000644000000000000000000001442515024302466027156 0ustar rootrootpackage redis import ( "github.com/creasty/defaults" "github.com/icinga/icinga-go-library/config" "github.com/icinga/icinga-go-library/testutils" "github.com/stretchr/testify/require" "os" "testing" "time" ) func TestConfig(t *testing.T) { var defaultOptions Options require.NoError(t, defaults.Set(&defaultOptions), "setting default options") configTests := []testutils.TestCase[Config, testutils.ConfigTestData]{ { Name: "Redis host missing", Data: testutils.ConfigTestData{ Yaml: `host:`, }, Error: testutils.ErrorContains("Redis host missing"), }, { Name: "Minimal config", Data: testutils.ConfigTestData{ Yaml: `host: localhost`, Env: map[string]string{"HOST": "localhost"}, }, Expected: Config{ Host: "localhost", Options: defaultOptions, }, }, { Name: "Redis password must be set, if username is provided", Data: testutils.ConfigTestData{ Yaml: ` host: localhost username: username`, Env: map[string]string{ "HOST": "localhost", "USERNAME": "username", }, }, Error: testutils.ErrorContains("Redis password must be set, if username is provided"), }, { Name: "Customized config", Data: testutils.ConfigTestData{ Yaml: ` host: localhost username: username password: password database: 2`, Env: map[string]string{ "HOST": "localhost", "USERNAME": "username", "PASSWORD": "password", "DATABASE": "2", }, }, Expected: Config{ Host: "localhost", Username: "username", Password: "password", Database: 2, Options: defaultOptions, }, }, { Name: "TLS", Data: testutils.ConfigTestData{ Yaml: ` host: localhost tls: true cert: cert.pem key: key.pem ca: ca.pem`, Env: map[string]string{ "HOST": "localhost", "TLS": "1", "CERT": "cert.pem", "KEY": "key.pem", "CA": "ca.pem", }, }, Expected: Config{ Host: "localhost", Options: defaultOptions, TlsOptions: config.TLS{ Enable: true, Cert: "cert.pem", Key: "key.pem", Ca: "ca.pem", }, }, }, { Name: "block_timeout must be positive", Data: testutils.ConfigTestData{ Yaml: ` host: localhost options: block_timeout: -1s`, Env: map[string]string{ "HOST": "localhost", "OPTIONS_BLOCK_TIMEOUT": "-1s", }, }, Error: testutils.ErrorContains("block_timeout must be positive"), }, { Name: "hmget_count must be at least 1", Data: testutils.ConfigTestData{ Yaml: ` host: localhost options: hmget_count: 0`, Env: map[string]string{ "HOST": "localhost", "OPTIONS_HMGET_COUNT": "0", }, }, Error: testutils.ErrorContains("hmget_count must be at least 1"), }, { Name: "hscan_count must be at least 1", Data: testutils.ConfigTestData{ Yaml: ` host: localhost options: hscan_count: 0`, Env: map[string]string{ "HOST": "localhost", "OPTIONS_HSCAN_COUNT": "0", }, }, Error: testutils.ErrorContains("hscan_count must be at least 1"), }, { Name: "max_hmget_connections must be at least 1", Data: testutils.ConfigTestData{ Yaml: ` host: localhost options: max_hmget_connections: 0`, Env: map[string]string{ "HOST": "localhost", "OPTIONS_MAX_HMGET_CONNECTIONS": "0", }, }, Error: testutils.ErrorContains("max_hmget_connections must be at least 1"), }, { Name: "timeout cannot be 0", Data: testutils.ConfigTestData{ Yaml: ` host: localhost options: timeout: 0s`, Env: map[string]string{ "HOST": "localhost", "OPTIONS_TIMEOUT": "0s", }, }, Error: testutils.ErrorContains("timeout cannot be 0. Configure a value greater than zero, or use -1 for no timeout"), }, { Name: "xread_count must be at least 1", Data: testutils.ConfigTestData{ Yaml: ` host: localhost options: xread_count: 0`, Env: map[string]string{ "HOST": "localhost", "OPTIONS_XREAD_COUNT": "0", }, }, Error: testutils.ErrorContains("xread_count must be at least 1"), }, { Name: "Options retain defaults", Data: testutils.ConfigTestData{ Yaml: ` host: localhost options: block_timeout: 2s hmget_count: 512`, Env: map[string]string{ "HOST": "localhost", "OPTIONS_BLOCK_TIMEOUT": "2s", "OPTIONS_HMGET_COUNT": "512", }, }, Expected: Config{ Host: "localhost", Options: Options{ BlockTimeout: 2 * time.Second, HMGetCount: 512, HScanCount: defaultOptions.HScanCount, MaxHMGetConnections: defaultOptions.MaxHMGetConnections, Timeout: defaultOptions.Timeout, XReadCount: defaultOptions.XReadCount, }, }, }, { Name: "Options", Data: testutils.ConfigTestData{ Yaml: ` host: localhost options: block_timeout: 2s hmget_count: 512 hscan_count: 1024 max_hmget_connections: 16 timeout: 60s xread_count: 2048`, Env: map[string]string{ "HOST": "localhost", "OPTIONS_BLOCK_TIMEOUT": "2s", "OPTIONS_HMGET_COUNT": "512", "OPTIONS_HSCAN_COUNT": "1024", "OPTIONS_MAX_HMGET_CONNECTIONS": "16", "OPTIONS_TIMEOUT": "60s", "OPTIONS_XREAD_COUNT": "2048", }, }, Expected: Config{ Host: "localhost", Options: Options{ BlockTimeout: 2 * time.Second, HMGetCount: 512, HScanCount: 1024, MaxHMGetConnections: 16, Timeout: 60 * time.Second, XReadCount: 2048, }, }, }, } t.Run("FromEnv", func(t *testing.T) { for _, tc := range configTests { t.Run(tc.Name, tc.F(func(data testutils.ConfigTestData) (Config, error) { var actual Config err := config.FromEnv(&actual, config.EnvOptions{Environment: data.Env}) return actual, err })) } }) t.Run("FromYAMLFile", func(t *testing.T) { for _, tc := range configTests { t.Run(tc.Name+"/FromYAMLFile", tc.F(func(data testutils.ConfigTestData) (Config, error) { var actual Config var err error testutils.WithYAMLFile(t, data.Yaml, func(file *os.File) { err = config.FromYAMLFile(file.Name(), &actual) }) return actual, err })) } }) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/redis/utils.go0000644000000000000000000000101415024302466026000 0ustar rootrootpackage redis import ( "context" "github.com/icinga/icinga-go-library/utils" "github.com/pkg/errors" "github.com/redis/go-redis/v9" ) // WrapCmdErr adds the command itself and // the stack of the current goroutine to the command's error if any. func WrapCmdErr(cmd redis.Cmder) error { err := cmd.Err() if err != nil { err = errors.Wrapf(err, "can't perform %q", utils.Ellipsize( redis.NewCmd(context.Background(), cmd.Args()).String(), // Omits error in opposite to cmd.String() 100, )) } return err } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/periodic/0000755000000000000000000000000015024302466025005 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/periodic/periodic.go0000644000000000000000000000461415024302466027137 0ustar rootrootpackage periodic import ( "context" "sync" "time" ) // Option configures Start. type Option interface { apply(*periodic) } // Stopper implements the Stop method, // which stops a periodic task from Start(). type Stopper interface { Stop() // Stops a periodic task. } // Tick is the value for periodic task callbacks that // contains the time of the tick and // the time elapsed since the start of the periodic task. type Tick struct { Elapsed time.Duration Time time.Time } // Immediate starts the periodic task immediately instead of after the first tick. func Immediate() Option { return optionFunc(func(p *periodic) { p.immediate = true }) } // OnStop configures a callback that is executed when a periodic task is stopped or canceled. func OnStop(f func(Tick)) Option { return optionFunc(func(p *periodic) { p.onStop = f }) } // Start starts a periodic task with a ticker at the specified interval, // which executes the given callback after each tick. // Pending tasks do not overlap, but could start immediately if // the previous task(s) takes longer than the interval. // Call Stop() on the return value in order to stop the ticker and to release associated resources. // The interval must be greater than zero. func Start(ctx context.Context, interval time.Duration, callback func(Tick), options ...Option) Stopper { t := &periodic{ interval: interval, callback: callback, } for _, option := range options { option.apply(t) } ctx, cancelCtx := context.WithCancel(ctx) start := time.Now() go func() { done := false if !t.immediate { select { case <-time.After(interval): case <-ctx.Done(): done = true } } if !done { ticker := time.NewTicker(t.interval) defer ticker.Stop() for tickTime := time.Now(); !done; { t.callback(Tick{ Elapsed: tickTime.Sub(start), Time: tickTime, }) select { case tickTime = <-ticker.C: case <-ctx.Done(): done = true } } } if t.onStop != nil { now := time.Now() t.onStop(Tick{ Elapsed: now.Sub(start), Time: now, }) } }() return stoperFunc(func() { t.stop.Do(cancelCtx) }) } type optionFunc func(*periodic) func (f optionFunc) apply(p *periodic) { f(p) } type stoperFunc func() func (f stoperFunc) Stop() { f() } type periodic struct { interval time.Duration callback func(Tick) immediate bool stop sync.Once onStop func(Tick) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/go.sum0000644000000000000000000001617115024302466024350 0ustar rootrootfilippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA= github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk= github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA= github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/goccy/go-yaml v1.13.0 h1:0Wtp0FZLd7Sm8gERmR9S6Iczzb3vItJj7NaHmFg8pTs= github.com/goccy/go-yaml v1.13.0/go.mod h1:IjYwxUiJDoqpx2RmbdjMUceGHZwYLon3sfOGl5Hi9lc= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/redis/go-redis/v9 v9.10.0 h1:FxwK3eV8p/CQa0Ch276C7u2d0eNC9kCmAYQ7mCXCzVs= github.com/redis/go-redis/v9 v9.10.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU= github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/version/0000755000000000000000000000000015024302466024674 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/version/version.go0000644000000000000000000001126715024302466026717 0ustar rootrootpackage version import ( "bufio" "errors" "fmt" "os" "runtime" "runtime/debug" "strconv" "strings" ) type VersionInfo struct { Version string Commit string } // Version determines version and commit information based on multiple data sources: // - Version information dynamically added by `git archive` in the remaining to parameters. // - A hardcoded version number passed as first parameter. // - Commit information added to the binary by `go build`. // // It's supposed to be called like this in combination with setting the `export-subst` attribute for the corresponding // file in .gitattributes: // // var Version = version.Version("1.0.0-rc2", "$Format:%(describe)$", "$Format:%H$") // // When exported using `git archive`, the placeholders are replaced in the file and this version information is // preferred. Otherwise the hardcoded version is used and augmented with commit information from the build metadata. func Version(version, gitDescribe, gitHash string) *VersionInfo { const hashLen = 7 // Same truncation length for the commit hash as used by git describe. if !strings.HasPrefix(gitDescribe, "$") && !strings.HasPrefix(gitHash, "$") { if strings.HasPrefix(gitDescribe, "%") { // Only Git 2.32+ supports %(describe), older versions don't expand it but keep it as-is. // Fall back to the hardcoded version augmented with the commit hash. gitDescribe = version if len(gitHash) >= hashLen { gitDescribe += "-g" + gitHash[:hashLen] } } return &VersionInfo{ Version: gitDescribe, Commit: gitHash, } } else { commit := "" if info, ok := debug.ReadBuildInfo(); ok { modified := false for _, setting := range info.Settings { switch setting.Key { case "vcs.revision": commit = setting.Value case "vcs.modified": modified, _ = strconv.ParseBool(setting.Value) } } if len(commit) >= hashLen { version += "-g" + commit[:hashLen] if modified { version += "-dirty" commit += " (modified)" } } } return &VersionInfo{ Version: version, Commit: commit, } } } // Print writes verbose version output to stdout. func (v *VersionInfo) Print(projectName string) { fmt.Printf("%s version: %s\n", projectName, v.Version) fmt.Println() fmt.Println("Build information:") fmt.Printf(" Go version: %s (%s, %s)\n", runtime.Version(), runtime.GOOS, runtime.GOARCH) if v.Commit != "" { fmt.Println(" Git commit:", v.Commit) } if r, err := readOsRelease(); err == nil { fmt.Println() fmt.Println("System information:") fmt.Println(" Platform:", r.Name) fmt.Println(" Platform version:", r.DisplayVersion()) } } // osRelease contains the information obtained from the os-release file. type osRelease struct { Name string Version string VersionId string BuildId string } // DisplayVersion returns the most suitable version information for display purposes. func (o *osRelease) DisplayVersion() string { if o.Version != "" { // Most distributions set VERSION return o.Version } else if o.VersionId != "" { // Some only set VERSION_ID (Alpine Linux for example) return o.VersionId } else if o.BuildId != "" { // Others only set BUILD_ID (Arch Linux for example) return o.BuildId } else { return "(unknown)" } } // readOsRelease reads and parses the os-release file. func readOsRelease() (*osRelease, error) { for _, path := range []string{"/etc/os-release", "/usr/lib/os-release"} { f, err := os.Open(path) // #nosec G304 -- Potential file inclusion via variable - Hard-coded files, so not affected by this issue. if err != nil { if os.IsNotExist(err) { continue // Try next path. } else { return nil, err } } o := &osRelease{ Name: "Linux", // Suggested default as per os-release(5) man page. } scanner := bufio.NewScanner(f) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "#") { continue // Ignore comment. } parts := strings.SplitN(line, "=", 2) if len(parts) != 2 { continue // Ignore empty or possibly malformed line. } key := parts[0] val := parts[1] // Unquote strings. This isn't fully compliant with the specification which allows using some shell escape // sequences. However, typically quotes are only used to allow whitespace within the value. if len(val) >= 2 && (val[0] == '"' || val[0] == '\'') && val[0] == val[len(val)-1] { val = val[1 : len(val)-1] } switch key { case "NAME": o.Name = val case "VERSION": o.Version = val case "VERSION_ID": o.VersionId = val case "BUILD_ID": o.BuildId = val } } if err := scanner.Err(); err != nil { return nil, err } return o, nil } return nil, errors.New("os-release file not found") } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/0000755000000000000000000000000015024302466023765 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/counter.go0000644000000000000000000000156515024302466026002 0ustar rootrootpackage com import ( "sync" "sync/atomic" ) // Counter implements an atomic counter. type Counter struct { value uint64 mu sync.Mutex // Protects total. total uint64 } // Add adds the given delta to the counter. func (c *Counter) Add(delta uint64) { atomic.AddUint64(&c.value, delta) } // Inc increments the counter by one. func (c *Counter) Inc() { c.Add(1) } // Reset resets the counter to 0 and returns its previous value. // Does not reset the total value returned from Total. func (c *Counter) Reset() uint64 { c.mu.Lock() defer c.mu.Unlock() v := atomic.SwapUint64(&c.value, 0) c.total += v return v } // Total returns the total counter value. func (c *Counter) Total() uint64 { c.mu.Lock() defer c.mu.Unlock() return c.total + c.Val() } // Val returns the current counter value. func (c *Counter) Val() uint64 { return atomic.LoadUint64(&c.value) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/cond.go0000644000000000000000000000437415024302466025247 0ustar rootrootpackage com import ( "context" "github.com/pkg/errors" ) // Cond implements a channel-based synchronization for goroutines that wait for signals or send them. // Internally based on a controller loop that handles the synchronization of new listeners and signal propagation, // which is only started when NewCond is called. Thus the zero value cannot be used. type Cond struct { broadcast chan struct{} done chan struct{} cancel context.CancelFunc listeners chan chan struct{} } // NewCond returns a new Cond and starts the controller loop. func NewCond(ctx context.Context) *Cond { ctx, cancel := context.WithCancel(ctx) c := &Cond{ broadcast: make(chan struct{}), cancel: cancel, done: make(chan struct{}), listeners: make(chan chan struct{}), } go c.controller(ctx) return c } // Broadcast sends a signal to all current listeners by closing the previously returned channel from Wait. // Panics if the controller loop has already ended. func (c *Cond) Broadcast() { select { case c.broadcast <- struct{}{}: case <-c.done: panic(errors.New("condition closed")) } } // Close stops the controller loop, waits for it to finish, and returns a nil error. // Implements the io.Closer interface, hence that return type is required. func (c *Cond) Close() error { c.cancel() <-c.done return nil } // Done returns a channel that will be closed when the controller loop has ended. func (c *Cond) Done() <-chan struct{} { return c.done } // Wait returns a channel that is closed with the next signal. // Panics if the controller loop has already ended. func (c *Cond) Wait() <-chan struct{} { select { case l := <-c.listeners: return l case <-c.done: panic(errors.New("condition closed")) } } // controller loop. func (c *Cond) controller(ctx context.Context) { defer close(c.done) // Note that the notify channel does not close when the controller loop ends // in order not to notify pending listeners. notify := make(chan struct{}) for { select { case <-c.broadcast: // Close channel to notify all current listeners. close(notify) // Create a new channel for the next listeners. notify = make(chan struct{}) case c.listeners <- notify: // A new listener received the channel. case <-ctx.Done(): return } } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/cond_test.go0000644000000000000000000000336715024302466026307 0ustar rootrootpackage com import ( "context" "github.com/stretchr/testify/require" "testing" "time" ) func TestCond_Broadcast(t *testing.T) { cond := NewCond(context.Background()) defer func() { _ = cond.Close() }() done := cond.Done() wait := cond.Wait() select { case <-done: require.Fail(t, "cond should not be closed, yet") case <-wait: require.Fail(t, "cond should not be ready, yet") case <-time.After(time.Second / 10): } cond.Broadcast() select { case <-done: require.Fail(t, "cond should still not be closed") case <-cond.Done(): require.Fail(t, "cond should not be closed for round 2, yet") case <-cond.Wait(): require.Fail(t, "cond should not be ready for round 2") case <-time.After(time.Second / 10): } select { case _, ok := <-wait: if ok { require.Fail(t, "cond ready channel should be closed") } case <-time.After(time.Second / 10): require.Fail(t, "cond should be ready") } } func TestCond_Close(t *testing.T) { cond := NewCond(context.Background()) done := cond.Done() wait := cond.Wait() require.NoError(t, cond.Close()) select { case _, ok := <-done: if ok { require.Fail(t, "existing cond-closed channel should be closed") } case <-time.After(time.Second / 10): require.Fail(t, "cond should be closed") } select { case _, ok := <-cond.Done(): if ok { require.Fail(t, "new cond-closed channel should be closed") } case <-time.After(time.Second / 10): require.Fail(t, "cond should be still closed") } select { case <-wait: require.Fail(t, "cond should not be ready") case <-time.After(time.Second / 10): } require.Panics(t, func() { cond.Wait() }, "cond should panic on Wait after Close") require.Panics(t, func() { cond.Broadcast() }, "cond should panic on Broadcast after Close") } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/com_test.go0000644000000000000000000001077415024302466026142 0ustar rootrootpackage com import ( "context" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" "io" "testing" "time" ) func TestWaitAsync(t *testing.T) { subtests := []struct { name string input WaiterFunc error error }{ {"no_error", func() error { return nil }, nil}, {"error", func() error { return io.EOF }, io.EOF}, {"sleep_no_error", func() error { time.Sleep(time.Second / 2); return nil }, nil}, {"sleep_error", func() error { time.Sleep(time.Second / 2); return io.EOF }, io.EOF}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { errs := WaitAsync(st.input) require.NotNil(t, errs) if st.error != nil { select { case e, ok := <-errs: if !ok { require.Fail(t, "channel should not be closed, yet") } require.Equal(t, st.error, e) case <-time.After(time.Second): require.Fail(t, "channel should not block") } } select { case _, ok := <-errs: if ok { require.Fail(t, "channel should be closed") } case <-time.After(time.Second): require.Fail(t, "channel should not block") } }) } } func TestErrgroupReceive(t *testing.T) { subtests := []struct { name string input []error error bool }{ {"nothing", nil, false}, {"nil", []error{nil}, false}, {"non-nil", []error{io.EOF}, true}, } latencies := []struct { name string latency time.Duration }{ {"instantly", 0}, {"1us", time.Microsecond}, {"20ms", 20 * time.Millisecond}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { for _, l := range latencies { t.Run(l.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() gCtx, gCancel := context.WithCancel(context.Background()) gCancel() g, _ := errgroup.WithContext(gCtx) errs := make(chan error) go func() { defer close(errs) for _, e := range st.input { if l.latency > 0 { select { case <-time.After(l.latency): case <-ctx.Done(): return } } select { case errs <- e: case <-ctx.Done(): return } } }() ErrgroupReceive(g, errs) if err := g.Wait(); st.error { require.Error(t, err) } else { require.NoError(t, err) } }) } }) } } func TestCopyFirst(t *testing.T) { subtests := []struct { name string io []string error bool }{ {"empty", nil, true}, {"one", []string{"a"}, false}, {"two", []string{"a", "b"}, false}, {"three", []string{"a", "b", "c"}, false}, } latencies := []struct { name string latency time.Duration }{ {"instantly", 0}, {"1us", time.Microsecond}, {"20ms", 20 * time.Millisecond}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { for _, l := range latencies { t.Run(l.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ch := make(chan string) go func() { defer close(ch) for _, v := range st.io { if l.latency > 0 { select { case <-time.After(l.latency): case <-ctx.Done(): return } } select { case ch <- v: case <-ctx.Done(): return } } }() first, forward, err := CopyFirst(ctx, ch) if st.error { require.Error(t, err) require.Nil(t, forward, "forward should be nil") return } require.NoError(t, err) require.NotNil(t, forward, "forward should not be nil") expected := "" if len(st.io) > 0 { expected = st.io[0] } require.Equal(t, expected, first, "first should be the first element") for _, expected := range st.io { select { case actual, ok := <-forward: if !ok { require.Fail(t, "channel should not be closed") } require.Equal(t, expected, actual, "forwarded element should match") case <-time.After(time.Second): require.Fail(t, "channel should not block") } } select { case _, ok := <-forward: if ok { require.Fail(t, "channel should be closed") } case <-time.After(time.Second): require.Fail(t, "channel should not block") } }) } }) } t.Run("cancel-ctx", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() first, forward, err := CopyFirst(ctx, make(chan int)) require.Error(t, err) require.Nil(t, forward) require.Empty(t, first) }) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/com.go0000644000000000000000000000372715024302466025103 0ustar rootrootpackage com import ( "context" "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) // Waiter implements the Wait method, // which blocks until execution is complete. type Waiter interface { Wait() error // Wait waits for execution to complete. } // The WaiterFunc type is an adapter to allow the use of ordinary functions as Waiter. // If f is a function with the appropriate signature, WaiterFunc(f) is a Waiter that calls f. type WaiterFunc func() error // Wait implements the Waiter interface. func (f WaiterFunc) Wait() error { return f() } // WaitAsync calls Wait() on the passed Waiter in a new goroutine and // sends the first non-nil error (if any) to the returned channel. // The returned channel is always closed when the Waiter is done. func WaitAsync(w Waiter) <-chan error { errs := make(chan error, 1) go func() { defer close(errs) if e := w.Wait(); e != nil { errs <- e } }() return errs } // ErrgroupReceive adds a goroutine to the specified group that // returns the first non-nil error (if any) from the specified channel. // If the channel is closed, it will return nil. func ErrgroupReceive(g *errgroup.Group, err <-chan error) { g.Go(func() error { return <-err }) } // CopyFirst asynchronously forwards all items from input to forward and synchronously returns the first item. func CopyFirst[T any]( ctx context.Context, input <-chan T, ) (first T, forward <-chan T, err error) { var ok bool select { case <-ctx.Done(): var zero T return zero, nil, ctx.Err() case first, ok = <-input: } if !ok { err = errors.New("can't copy from closed channel") return } // Buffer of one because we receive an entity and send it back immediately. fwd := make(chan T, 1) fwd <- first forward = fwd go func() { defer close(fwd) for { select { case <-ctx.Done(): return case e, ok := <-input: if !ok { return } select { case <-ctx.Done(): return case fwd <- e: } } } }() return } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/bulker.go0000644000000000000000000000667315024302466025614 0ustar rootrootpackage com import ( "context" "golang.org/x/sync/errgroup" "sync" "time" ) // BulkChunkSplitPolicy is a state machine which tracks the items of a chunk a bulker assembles. // A call takes an item for the current chunk into account. // Output true indicates that the state machine was reset first and the bulker // shall finish the current chunk now (not e.g. once $size is reached) without the given item. type BulkChunkSplitPolicy[T any] func(T) bool type BulkChunkSplitPolicyFactory[T any] func() BulkChunkSplitPolicy[T] // NeverSplit returns a pseudo state machine which never demands splitting. func NeverSplit[T any]() BulkChunkSplitPolicy[T] { return neverSplit[T] } func neverSplit[T any](T) bool { return false } // Bulker reads all values from a channel and streams them in chunks into a Bulk channel. type Bulker[T any] struct { ch chan []T ctx context.Context mu sync.Mutex } // NewBulker returns a new Bulker and starts streaming. func NewBulker[T any]( ctx context.Context, ch <-chan T, count int, splitPolicyFactory BulkChunkSplitPolicyFactory[T], ) *Bulker[T] { b := &Bulker[T]{ ch: make(chan []T), ctx: ctx, mu: sync.Mutex{}, } go b.run(ch, count, splitPolicyFactory) return b } // Bulk returns the channel on which the bulks are delivered. func (b *Bulker[T]) Bulk() <-chan []T { return b.ch } func (b *Bulker[T]) run(ch <-chan T, count int, splitPolicyFactory BulkChunkSplitPolicyFactory[T]) { defer close(b.ch) bufCh := make(chan T, count) splitPolicy := splitPolicyFactory() g, ctx := errgroup.WithContext(b.ctx) g.Go(func() error { defer close(bufCh) for { select { case v, ok := <-ch: if !ok { return nil } bufCh <- v case <-ctx.Done(): return ctx.Err() } } }) g.Go(func() error { for done := false; !done; { buf := make([]T, 0, count) timeout := time.After(256 * time.Millisecond) for drain := true; drain && len(buf) < count; { select { case v, ok := <-bufCh: if !ok { drain = false done = true break } if splitPolicy(v) { if len(buf) > 0 { b.ch <- buf buf = make([]T, 0, count) } timeout = time.After(256 * time.Millisecond) } buf = append(buf, v) case <-timeout: drain = false case <-ctx.Done(): return ctx.Err() } } if len(buf) > 0 { b.ch <- buf } splitPolicy = splitPolicyFactory() } return nil }) // We don't expect an error here. // We only use errgroup for the encapsulated use of sync.WaitGroup. _ = g.Wait() } // Bulk reads all values from a channel and streams them in chunks into a returned channel. func Bulk[T any]( ctx context.Context, ch <-chan T, count int, splitPolicyFactory BulkChunkSplitPolicyFactory[T], ) <-chan []T { if count <= 1 { return oneBulk(ctx, ch) } return NewBulker(ctx, ch, count, splitPolicyFactory).Bulk() } // oneBulk operates just as NewBulker(ctx, ch, 1, splitPolicy).Bulk(), // but without the overhead of the actual bulk creation with a buffer channel, timeout and BulkChunkSplitPolicy. func oneBulk[T any](ctx context.Context, ch <-chan T) <-chan []T { out := make(chan []T) go func() { defer close(out) for { select { case item, ok := <-ch: if !ok { return } select { case out <- []T{item}: case <-ctx.Done(): return } case <-ctx.Done(): return } } }() return out } var ( _ BulkChunkSplitPolicyFactory[struct{}] = NeverSplit[struct{}] ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/bulker_test.go0000644000000000000000000000723215024302466026643 0ustar rootrootpackage com import ( "context" "github.com/stretchr/testify/require" "testing" "time" ) func TestBulk(t *testing.T) { noSp := NeverSplit[string] var closeContext []string = nil subtests := []struct { name string input [][]string count int spf BulkChunkSplitPolicyFactory[string] output [][]string }{ {"empty", nil, 1, noSp, nil}, {"negative", [][]string{{"a"}}, -1, noSp, [][]string{{"a"}}}, {"a0", [][]string{{"a"}}, 0, noSp, [][]string{{"a"}}}, {"a1", [][]string{{"a"}}, 1, noSp, [][]string{{"a"}}}, {"a2", [][]string{{"a"}}, 2, noSp, [][]string{{"a"}}}, {"ab1", [][]string{{"a", "b"}}, 1, noSp, [][]string{{"a"}, {"b"}}}, {"ab2", [][]string{{"a", "b"}}, 2, noSp, [][]string{{"a", "b"}}}, {"ab3", [][]string{{"a", "b"}}, 3, noSp, [][]string{{"a", "b"}}}, {"abc1", [][]string{{"a", "b", "c"}}, 1, noSp, [][]string{{"a"}, {"b"}, {"c"}}}, {"abc2", [][]string{{"a", "b", "c"}}, 2, noSp, [][]string{{"a", "b"}, {"c"}}}, {"abc3", [][]string{{"a", "b", "c"}}, 3, noSp, [][]string{{"a", "b", "c"}}}, {"abc4", [][]string{{"a", "b", "c"}}, 4, noSp, [][]string{{"a", "b", "c"}}}, { "chunks_by_timeout", [][]string{{"a", "b", "c", "d"}, {"e", "f", "g"}, {"h", "i"}, {"j"}}, 5, noSp, [][]string{{"a", "b", "c", "d"}, {"e", "f", "g"}, {"h", "i"}, {"j"}}, }, {"chunks_by_spf", [][]string{{"a", "b", "c", "d", "e", "f", "g"}}, 2, func() BulkChunkSplitPolicy[string] { return func(string) bool { return true } }, [][]string{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}}}, {"close-ctx_a1", [][]string{closeContext, {"a"}}, 1, noSp, nil}, {"close-ctx_a4", [][]string{closeContext, {"a"}}, 4, noSp, nil}, {"a_close-ctx_b1", [][]string{{"a"}, closeContext, {"b"}}, 1, noSp, [][]string{{"a"}}}, {"a_close-ctx_b4", [][]string{{"a"}, closeContext, {"b"}}, 4, noSp, [][]string{{"a"}}}, {"ab_close-ctx_c1", [][]string{{"a", "b"}, closeContext, {"c"}}, 1, noSp, [][]string{{"a"}, {"b"}}}, {"ab_close-ctx_c4", [][]string{{"a", "b"}, closeContext, {"c"}}, 4, noSp, [][]string{{"a", "b"}}}, } latencies := []struct { name string latency time.Duration }{ {"instantly", 0}, {"1us", time.Microsecond}, {"20ms", 20 * time.Millisecond}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { for _, l := range latencies { t.Run(l.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() bulkCtx, cancelBulk := context.WithCancel(ctx) ch := make(chan string, 1) go func() { defer close(ch) for i, chunk := range st.input { if i > 0 { select { case <-time.After(time.Second / 2): case <-ctx.Done(): return } } if chunk == nil { cancelBulk() } for _, v := range chunk { if l.latency > 0 { select { case <-time.After(l.latency): case <-ctx.Done(): return } } select { case ch <- v: case <-ctx.Done(): return } } } }() output := Bulk(bulkCtx, ch, st.count, st.spf) require.NotNil(t, output) for _, expected := range st.output { select { case actual, ok := <-output: if !ok { require.Fail(t, "channel should not be closed, yet") } require.Equal(t, expected, actual) case <-time.After(time.Second): require.Fail(t, "channel should not block") } } select { case _, ok := <-output: if ok { require.Fail(t, "channel should be closed") } case <-time.After(time.Second): require.Fail(t, "channel should not block") } }) } }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/com/counter_test.go0000644000000000000000000000143515024302466027035 0ustar rootrootpackage com import ( "github.com/stretchr/testify/require" "testing" ) func TestCounter_Add(t *testing.T) { var c Counter c.Add(42) require.Equal(t, uint64(42), c.Val(), "unexpected value") require.Equal(t, uint64(42), c.Total(), "unexpected total") c.Add(23) require.Equal(t, uint64(65), c.Val(), "unexpected new value") require.Equal(t, uint64(65), c.Total(), "unexpected new total") } func TestCounter_Reset(t *testing.T) { var c Counter c.Add(42) require.Equal(t, uint64(42), c.Reset(), "unexpected reset value") require.Equal(t, uint64(0), c.Val(), "unexpected value") require.Equal(t, uint64(42), c.Total(), "unexpected total") c.Add(23) require.Equal(t, uint64(23), c.Val(), "unexpected new value") require.Equal(t, uint64(65), c.Total(), "unexpected new total") } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/.github/0000755000000000000000000000000015024302466024547 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/.github/dependabot.yml0000644000000000000000000000030315024302466027373 0ustar rootrootversion: 2 updates: - package-ecosystem: gomod directory: / schedule: interval: daily - package-ecosystem: github-actions directory: / schedule: interval: daily dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/.github/workflows/0000755000000000000000000000000015024302466026604 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/.github/workflows/go.yml0000644000000000000000000000077715024302466027747 0ustar rootrootname: Go on: push: branches: [ main ] pull_request: { } permissions: contents: read checks: write jobs: go: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v5 with: go-version: stable cache: false - name: Lint uses: golangci/golangci-lint-action@v8 with: version: latest only-new-issues: true - name: Test run: go test -v ./... dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/.github/workflows/sql.yml0000644000000000000000000000425215024302466030131 0ustar rootrootname: SQL on: push: branches: - main pull_request: {} jobs: mysql: name: ${{ matrix.database.name }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: database: - {name: MySQL 5.7, image: "mysql:5.7"} - {name: MySQL 8.0, image: "mysql:8.0"} - {name: MySQL latest, image: "mysql:latest"} - {name: MariaDB 10.1, image: "mariadb:10.1"} - {name: MariaDB 10.2, image: "mariadb:10.2"} - {name: MariaDB 10.3, image: "mariadb:10.3"} - {name: MariaDB 10.4, image: "mariadb:10.4"} - {name: MariaDB 10.5, image: "mariadb:10.5"} - {name: MariaDB 10.6, image: "mariadb:10.6"} - {name: MariaDB 10.7, image: "mariadb:10.7"} - {name: MariaDB 10.11, image: "mariadb:10.11"} - {name: MariaDB 11.0, image: "mariadb:11.0"} - {name: MariaDB latest, image: "mariadb:latest"} env: ICINGAGOLIBRARY_TESTS_DB_TYPE: mysql ICINGAGOLIBRARY_TESTS_DB: icinga_unittest ICINGAGOLIBRARY_TESTS_DB_USER: root ICINGAGOLIBRARY_TESTS_DB_PASSWORD: password ICINGAGOLIBRARY_TESTS_DB_HOST: 127.0.0.1 ICINGAGOLIBRARY_TESTS_DB_PORT: 3306 services: mysql: image: ${{ matrix.database.image }} env: MYSQL_ROOT_PASSWORD: ${{ env.ICINGAGOLIBRARY_TESTS_DB_PASSWORD }} MYSQL_DATABASE: ${{ env.ICINGAGOLIBRARY_TESTS_DB }} # Wait for the containers to become ready options: >- --health-cmd "${{ (startsWith(matrix.database.image, 'mysql:') || startsWith(matrix.database.image, 'mariadb:10')) && 'mysqladmin ping' || 'healthcheck.sh --connect --innodb_initialized' }}" --health-interval 10s --health-timeout 5s --health-retries 10 ports: - 3306:3306 steps: - name: Setup Go uses: actions/setup-go@v5 with: go-version: stable - name: Checkout code uses: actions/checkout@v4 - name: Download dependencies run: go get -v -t -d ./... - name: Run tests timeout-minutes: 10 run: go test -v -timeout 5m ./... dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/flatten/0000755000000000000000000000000015024302466024644 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/flatten/flatten.go0000644000000000000000000000202115024302466026623 0ustar rootrootpackage flatten import ( "fmt" "github.com/icinga/icinga-go-library/types" "strconv" ) // Flatten creates flat, one-dimensional maps from arbitrarily nested values, e.g. JSON. func Flatten(value interface{}, prefix string) map[string]types.String { var flatten func(string, interface{}) flattened := make(map[string]types.String) flatten = func(key string, value interface{}) { switch value := value.(type) { case map[string]interface{}: if len(value) == 0 { flattened[key] = types.String{} break } for k, v := range value { flatten(key+"."+k, v) } case []interface{}: if len(value) == 0 { flattened[key] = types.String{} break } for i, v := range value { flatten(key+"["+strconv.Itoa(i)+"]", v) } case nil: flattened[key] = types.MakeString("null") case float64: flattened[key] = types.MakeString(strconv.FormatFloat(value, 'f', -1, 64)) default: flattened[key] = types.MakeString(fmt.Sprintf("%v", value)) } } flatten(prefix, value) return flattened } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/flatten/flatten_test.go0000644000000000000000000000354015024302466027671 0ustar rootrootpackage flatten import ( "github.com/icinga/icinga-go-library/types" "github.com/stretchr/testify/assert" "testing" ) func TestFlatten(t *testing.T) { for _, st := range []struct { name string prefix string value any output map[string]types.String }{ {"nil", "a", nil, map[string]types.String{"a": types.MakeString("null")}}, {"bool", "b", true, map[string]types.String{"b": types.MakeString("true")}}, {"int", "c", 42, map[string]types.String{"c": types.MakeString("42")}}, {"float", "d", 77.7, map[string]types.String{"d": types.MakeString("77.7")}}, {"large_float", "e", 1e23, map[string]types.String{"e": types.MakeString("100000000000000000000000")}}, {"string", "f", "\x00", map[string]types.String{"f": types.MakeString("\x00")}}, {"nil_slice", "g", []any(nil), map[string]types.String{"g": {}}}, {"empty_slice", "h", []any{}, map[string]types.String{"h": {}}}, {"slice", "i", []any{nil}, map[string]types.String{"i[0]": types.MakeString("null")}}, {"nil_map", "j", map[string]any(nil), map[string]types.String{"j": {}}}, {"empty_map", "k", map[string]any{}, map[string]types.String{"k": {}}}, {"map", "l", map[string]any{" ": nil}, map[string]types.String{"l. ": types.MakeString("null")}}, {"map_with_slice", "m", map[string]any{"\t": []any{"ä", "ö", "ü"}, "ß": "s"}, map[string]types.String{ "m.\t[0]": types.MakeString("ä"), "m.\t[1]": types.MakeString("ö"), "m.\t[2]": types.MakeString("ü"), "m.ß": types.MakeString("s"), }}, {"slice_with_map", "n", []any{map[string]any{"ä": "a", "ö": "o", "ü": "u"}, "ß"}, map[string]types.String{ "n[0].ä": types.MakeString("a"), "n[0].ö": types.MakeString("o"), "n[0].ü": types.MakeString("u"), "n[1]": types.MakeString("ß"), }}, } { t.Run(st.name, func(t *testing.T) { assert.Equal(t, st.output, Flatten(st.value, st.prefix)) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/0000755000000000000000000000000015024302466024753 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/column_map.go0000644000000000000000000000402315024302466027433 0ustar rootrootpackage database import ( "database/sql/driver" "github.com/jmoiron/sqlx/reflectx" "reflect" "sync" ) // ColumnMap provides a cached mapping of structs exported fields to their database column names. type ColumnMap interface { // Columns returns database column names for a struct's exported fields in a cached manner. // Thus, the returned slice MUST NOT be modified directly. // By default, all exported struct fields are mapped to database column names using snake case notation. // The - (hyphen) directive for the db tag can be used to exclude certain fields. Columns(any) []string } // NewColumnMap returns a new ColumnMap. func NewColumnMap(mapper *reflectx.Mapper) ColumnMap { return &columnMap{ cache: make(map[reflect.Type][]string), mapper: mapper, } } type columnMap struct { mutex sync.Mutex cache map[reflect.Type][]string mapper *reflectx.Mapper } func (m *columnMap) Columns(subject any) []string { m.mutex.Lock() defer m.mutex.Unlock() t, ok := subject.(reflect.Type) if !ok { t = reflect.TypeOf(subject) } columns, ok := m.cache[t] if !ok { columns = m.getColumns(t) m.cache[t] = columns } return columns } func (m *columnMap) getColumns(t reflect.Type) []string { fields := m.mapper.TypeMap(t).Names columns := make([]string, 0, len(fields)) FieldLoop: for _, f := range fields { // If one of the parent fields implements the driver.Valuer interface, the field can be ignored. for parent := f.Parent; parent != nil && parent.Zero.IsValid(); parent = parent.Parent { // Check for pointer types. if _, ok := reflect.New(parent.Field.Type).Interface().(driver.Valuer); ok { continue FieldLoop } // Check for non-pointer types. if _, ok := reflect.Zero(parent.Field.Type).Interface().(driver.Valuer); ok { continue FieldLoop } } columns = append(columns, f.Path) } // Shrink/reduce slice length and capacity: // For a three-index slice (slice[a:b:c]), the length of the returned slice is b-a and the capacity is c-a. return columns[0:len(columns):len(columns)] } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/config.go0000644000000000000000000000226215024302466026551 0ustar rootrootpackage database import ( "github.com/icinga/icinga-go-library/config" "github.com/pkg/errors" ) // Config defines database client configuration. type Config struct { Type string `yaml:"type" env:"TYPE" default:"mysql"` Host string `yaml:"host" env:"HOST"` Port int `yaml:"port" env:"PORT"` Database string `yaml:"database" env:"DATABASE"` User string `yaml:"user" env:"USER"` Password string `yaml:"password" env:"PASSWORD,unset"` TlsOptions config.TLS `yaml:",inline"` Options Options `yaml:"options" envPrefix:"OPTIONS_"` } // Validate checks constraints in the supplied database configuration and returns an error if they are violated. func (c *Config) Validate() error { switch c.Type { case "mysql", "pgsql": default: return unknownDbType(c.Type) } if c.Host == "" { return errors.New("database host missing") } if c.User == "" { return errors.New("database user missing") } if c.Database == "" { return errors.New("database name missing") } return c.Options.Validate() } func unknownDbType(t string) error { return errors.Errorf(`unknown database type %q, must be one of: "mysql", "pgsql"`, t) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/db_test.go0000644000000000000000000000514215024302466026730 0ustar rootrootpackage database import ( "github.com/icinga/icinga-go-library/config" "github.com/icinga/icinga-go-library/logging" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" "testing" ) func TestNewDbFromConfig_GetAddr(t *testing.T) { tests := []struct { name string conf *Config addr string }{ { name: "mysql-simple", conf: &Config{ Type: "mysql", Host: "example.com", Database: "db", User: "user", }, addr: "mysql://user@example.com:3306/db", }, { name: "mysql-custom-port", conf: &Config{ Type: "mysql", Host: "example.com", Port: 1234, Database: "db", User: "user", }, addr: "mysql://user@example.com:1234/db", }, { name: "mysql-tls", conf: &Config{ Type: "mysql", Host: "example.com", Database: "db", User: "user", TlsOptions: config.TLS{Enable: true}, }, addr: "mysql+tls://user@example.com:3306/db", }, { name: "mysql-unix-domain-socket", conf: &Config{ Type: "mysql", Host: "/var/empty/mysql.sock", Database: "db", User: "user", }, addr: "mysql://user@(/var/empty/mysql.sock)/db", }, { name: "pgsql-simple", conf: &Config{ Type: "pgsql", Host: "example.com", Database: "db", User: "user", }, addr: "pgsql://user@example.com:5432/db", }, { name: "pgsql-custom-port", conf: &Config{ Type: "pgsql", Host: "example.com", Port: 1234, Database: "db", User: "user", }, addr: "pgsql://user@example.com:1234/db", }, { name: "pgsql-tls", conf: &Config{ Type: "pgsql", Host: "example.com", Database: "db", User: "user", TlsOptions: config.TLS{Enable: true}, }, addr: "pgsql+tls://user@example.com:5432/db", }, { name: "pgsql-unix-domain-socket", conf: &Config{ Type: "pgsql", Host: "/var/empty/pgsql", Database: "db", User: "user", }, addr: "pgsql://user@(/var/empty/pgsql/.s.PGSQL.5432)/db", }, { name: "pgsql-unix-domain-socket-custom-port", conf: &Config{ Type: "pgsql", Host: "/var/empty/pgsql", Port: 1234, Database: "db", User: "user", }, addr: "pgsql://user@(/var/empty/pgsql/.s.PGSQL.1234)/db", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { db, err := NewDbFromConfig( test.conf, logging.NewLogger(zaptest.NewLogger(t).Sugar(), 0), RetryConnectorCallbacks{}) require.NoError(t, err) require.Equal(t, test.addr, db.GetAddr()) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/utils_test.go0000644000000000000000000001266015024302466027506 0ustar rootrootpackage database import ( "context" "database/sql/driver" "fmt" "github.com/creasty/defaults" "github.com/go-sql-driver/mysql" "github.com/icinga/icinga-go-library/logging" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" "os" "strconv" "strings" "testing" "time" ) func TestDatabaseUtils(t *testing.T) { t.Parallel() ctx := context.Background() db := GetTestDB(ctx, t, "ICINGAGOLIBRARY") t.Run("SetMySQLSessionVars", func(t *testing.T) { t.Parallel() if db.DriverName() != MySQL { t.Skipf("skipping set session vars test for %q driver", db.DriverName()) } setMysqlSessionVars(ctx, db, t) }) t.Run("InsertObtainID", func(t *testing.T) { t.Parallel() defer func() { _, err := db.ExecContext(ctx, "DROP TABLE IF EXISTS igl_test_insert_obtain") assert.NoError(t, err, "dropping test database table should not fail") }() var err error if db.DriverName() == PostgreSQL { _, err = db.ExecContext(ctx, "CREATE TABLE igl_test_insert_obtain (id SERIAL PRIMARY KEY, name VARCHAR(255))") } else { _, err = db.ExecContext(ctx, "CREATE TABLE igl_test_insert_obtain (id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255))") } require.NoError(t, err, "creating test database table should not fail") id, err := InsertObtainID(ctx, db, "INSERT INTO igl_test_insert_obtain (name) VALUES (:name)", map[string]any{"name": "test1"}) require.NoError(t, err, "inserting new row into test database table should not fail") assert.Equal(t, id, int64(1)) id, err = InsertObtainID(ctx, db, "INSERT INTO igl_test_insert_obtain (name) VALUES (:name)", map[string]any{"name": "test2"}) require.NoError(t, err, "inserting new row into test database table should not fail") assert.Equal(t, id, int64(2)) }) } func setMysqlSessionVars(ctx context.Context, db *DB, t *testing.T) { vars := map[string][]struct { name string value string expect error }{ "UnknownVariables": { // MySQL single nodes do not recognise the "wsrep_sync_wait" system variable, but MariaDB does! {name: "wsrep_sync_wait", value: "15"}, // MySQL unknown sys var | MariaDB succeeds {name: "wsrep_sync_wait", value: "7"}, // MySQL unknown sys var | MariaDB succeeds // Just some random unknown system variables :-) {name: "Icinga", value: "Icinga"}, // unknown sys var {name: "IcingaDB", value: "IcingaDB"}, // unknown sys var }, "VariablesWithCorrectValue": { // Setting system variables known by MySQL/MariaDB to a valid value {name: "autocommit", value: "true"}, {name: "binlog_format", value: "MIXED"}, {name: "completion_type", value: "1" /** CHAIN */}, {name: "completion_type", value: "CHAIN"}, {name: "default_storage_engine", value: "InnoDB"}, }, "VariablesWithInvalidValues": { // System variables set to an invalid value {name: "autocommit", value: "SOMETHING", expect: &mysql.MySQLError{Number: 1231}}, {name: "binlog_format", value: "IcingaDB", expect: &mysql.MySQLError{Number: 1231}}, // Invalid val! {name: "completion_type", value: "-10", expect: &mysql.MySQLError{Number: 1231}}, // Min valid val 0 {name: "default_storage_engine", value: "IcingaDB", expect: &mysql.MySQLError{Number: 1286}}, // Unknown storage Engine! }, } for name, vs := range vars { t.Run(name, func(t *testing.T) { t.Parallel() for _, v := range vs { conn, err := db.Conn(ctx) require.NoError(t, err, "connecting to MySQL/MariaDB database should not fail") err = conn.Raw(func(conn any) error { c, ok := conn.(driver.Conn) if !ok { return fmt.Errorf("conn is not a driver.Conn") } return unsafeSetSessionVariableIfExists(ctx, c, v.name, v.value) }) assert.ErrorIsf(t, err, v.expect, "setting %q variable to '%v' returns unexpected result", v.name, v.value) assert.NoError(t, conn.Close(), "closing MySQL/MariaDB connection should not fail") } }) } } // GetTestDB retrieves the database config from env variables, opens a new database and returns it. // The [envPrefix] argument defines the environment variables prefix to look for e.g. `ICINGAGOLIBRARY`. // // The test suite will be skipped if no `envPrefix+"_TESTS_DB_TYPE" environment variable is // set, otherwise fails fatally when invalid configurations are specified. func GetTestDB(ctx context.Context, t *testing.T, envPrefix string) *DB { c := &Config{} require.NoError(t, defaults.Set(c), "applying config default should not fail") if v, ok := os.LookupEnv(envPrefix + "_TESTS_DB_TYPE"); ok { c.Type = strings.ToLower(v) } else { t.Skipf("Environment %q not set, skipping test!", envPrefix+"_TESTS_DB_TYPE") } if v, ok := os.LookupEnv(envPrefix + "_TESTS_DB"); ok { c.Database = v } if v, ok := os.LookupEnv(envPrefix + "_TESTS_DB_USER"); ok { c.User = v } if v, ok := os.LookupEnv(envPrefix + "_TESTS_DB_PASSWORD"); ok { c.Password = v } if v, ok := os.LookupEnv(envPrefix + "_TESTS_DB_HOST"); ok { c.Host = v } if v, ok := os.LookupEnv(envPrefix + "_TESTS_DB_PORT"); ok { port, err := strconv.Atoi(v) require.NoError(t, err, "invalid port provided") c.Port = port } require.NoError(t, c.Validate(), "database config validation should not fail") db, err := NewDbFromConfig(c, logging.NewLogger(zaptest.NewLogger(t).Sugar(), time.Hour), RetryConnectorCallbacks{}) require.NoError(t, err, "connecting to database should not fail") require.NoError(t, db.PingContext(ctx), "pinging the database should not fail") return db } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/mysql_split_test.go0000644000000000000000000000271315024302466030724 0ustar rootrootpackage database import ( "github.com/stretchr/testify/assert" "testing" ) func TestMysqlSplitQueries(t *testing.T) { tests := []struct { name string input string want []string }{{ name: "empty", input: "", want: nil, }, { name: "default_delimiter", input: "q1;\nq2;\nq3;\n", want: []string{"q1", "q2", "q3"}, }, { name: "delimiter_at_eof", input: "q1;", want: []string{"q1"}, }, { name: "delimiter_switch", input: "q1;\ndelimiter //\nq2//\ndelimiter ;\nq3;\n", want: []string{"q1", "q2", "q3"}, }, { name: "delimiter_as_column_name", input: "SELECT 1 AS\ndelimiter WHERE\n1=1;\nSELECT 42 WHERE\n1=1", want: []string{"SELECT 1 AS\ndelimiter WHERE\n1=1", "SELECT 42 WHERE\n1=1"}, }, { name: "delimiter_as_value", input: "SELECT ';';\ndelimiter //\nSELECT '//'//", want: []string{"SELECT ';'", "SELECT '//'"}, }, { name: "delimiters_but_no_queries", input: "DELIMITER //\nDELIMITER ;", want: nil, }, { name: "extra_newlines", input: "\n\n\nSELECT 1;\n\n\nDELIMITER //\n\n\nSELECT 42//\n\n\nSELECT 23\n\n\n", want: []string{"SELECT 1", "SELECT 42", "SELECT 23"}, }, { name: "ignore_empty_statements", input: "SELECT 1\n;\n;\nSELECT 2\n;\n;\nSELECT 3\n;\n;\n", want: []string{"SELECT 1", "SELECT 2", "SELECT 3"}, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equalf(t, tt.want, MysqlSplitStatements(tt.input), "MysqlSplitStatements(%v)", tt.input) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/driver.go0000644000000000000000000000657615024302466026613 0ustar rootrootpackage database import ( "context" "database/sql/driver" "github.com/icinga/icinga-go-library/backoff" "github.com/icinga/icinga-go-library/logging" "github.com/icinga/icinga-go-library/retry" "github.com/pkg/errors" "go.uber.org/zap" "sync/atomic" "time" ) // Driver names as automatically registered in the database/sql package by themselves. const ( MySQL string = "mysql" PostgreSQL string = "postgres" ) // OnInitConnFunc can be used to execute post Connect() arbitrary actions. // It will be called after successfully initiated a new connection using the connector's Connect method. type OnInitConnFunc func(context.Context, driver.Conn) error // RetryConnectorCallbacks specifies callbacks that are executed upon certain events. type RetryConnectorCallbacks struct { OnInitConn OnInitConnFunc OnRetryableError retry.OnRetryableErrorFunc OnSuccess retry.OnSuccessFunc } // RetryConnector wraps driver.Connector with retry logic. // // The first connection attempt will be retried for [retry.DefaultTimeout]. After a prior successful connection, // reconnection attempts are made infinitely. type RetryConnector struct { driver.Connector logger *logging.Logger callbacks RetryConnectorCallbacks hadConnection atomic.Bool } // NewConnector creates a fully initialized RetryConnector from the given args. func NewConnector(c driver.Connector, logger *logging.Logger, callbacks RetryConnectorCallbacks) *RetryConnector { return &RetryConnector{Connector: c, logger: logger, callbacks: callbacks} } // Connect implements part of the driver.Connector interface. func (c *RetryConnector) Connect(ctx context.Context) (driver.Conn, error) { retryTimeout := retry.DefaultTimeout if c.hadConnection.Load() { retryTimeout = 0 } var conn driver.Conn err := errors.Wrap(retry.WithBackoff( ctx, func(ctx context.Context) (err error) { conn, err = c.Connector.Connect(ctx) if err == nil && c.callbacks.OnInitConn != nil { if err = c.callbacks.OnInitConn(ctx, conn); err != nil { // We're going to retry this, so just don't bother whether Close() fails! _ = conn.Close() } } return }, retry.Retryable, backoff.DefaultBackoff, retry.Settings{ Timeout: retryTimeout, OnRetryableError: func(elapsed time.Duration, attempt uint64, err, lastErr error) { if c.callbacks.OnRetryableError != nil { c.callbacks.OnRetryableError(elapsed, attempt, err, lastErr) } c.logger.Warnw("Can't connect to database. Retrying", zap.Error(err), zap.Duration("after", elapsed), zap.Uint64("attempt", attempt)) }, OnSuccess: func(elapsed time.Duration, attempt uint64, lastErr error) { c.hadConnection.Store(true) if c.callbacks.OnSuccess != nil { c.callbacks.OnSuccess(elapsed, attempt, lastErr) } if attempt > 1 { c.logger.Infow("Reconnected to database", zap.Duration("after", elapsed), zap.Uint64("attempts", attempt)) } }, }, ), "can't connect to database") return conn, err } // Driver implements part of the driver.Connector interface. func (c *RetryConnector) Driver() driver.Driver { return c.Connector.Driver() } // MysqlFuncLogger is an adapter that allows ordinary functions to be used as a logger for mysql.SetLogger. type MysqlFuncLogger func(v ...interface{}) // Print implements the mysql.Logger interface. func (log MysqlFuncLogger) Print(v ...interface{}) { log(v) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/mysql_split.go0000644000000000000000000000447415024302466027673 0ustar rootrootpackage database import ( "regexp" "strings" ) var delimiterCommandRe = regexp.MustCompile(`(?im)\A\s*delimiter\s*(\S+)\s*$`) // MysqlSplitStatements takes a string containing multiple SQL statements and splits them into individual statements // with limited support for the DELIMITER keyword like implemented by the mysql command line client. // // The main purpose of this function is to allow importing a schema file containing stored functions from Go. Such files // have to specify an alternative delimiter internally if the function has semicolons in its body, otherwise the mysql // command line clients splits the CREATE FUNCTION statement somewhere in the middle. This delimiter handling is not // supported by the MySQL server, so when trying to import such a schema file using a different method than the mysql // command line client, the delimiter handling has to be reimplemented. This is what this function does. // // To avoid an overly complex implementation, this function has some limitations on its input: // - Specifying a delimiter using a quoted string is NOT supported. // - Statements are only split if the delimiter appears at the end of a line. This in done in order to avoid // accidentally splitting in the middle of string literals and comments. // - The function does not attempt to handle comments in any way, so there must not be a delimiter at the end of a line // within a comment. // - The delimiter command is only recognized at the beginning of the file or immediately following a delimiter at the // end of a previous line, there must not be a comment in between, empty lines are fine. func MysqlSplitStatements(statements string) []string { delimiterRe := makeDelimiterRe(";") var result []string for len(statements) > 0 { if match := delimiterCommandRe.FindStringSubmatch(statements); match != nil { delimiterRe = makeDelimiterRe(match[1]) statements = statements[len(match[0]):] continue } split := delimiterRe.Split(statements, 2) if statement := strings.TrimSpace(split[0]); len(statement) > 0 { result = append(result, statement) } if len(split) > 1 { statements = split[1] } else { statements = "" } } return result } func makeDelimiterRe(delimiter string) *regexp.Regexp { return regexp.MustCompile(`(?m)` + regexp.QuoteMeta(delimiter) + `$`) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/config_test.go0000644000000000000000000003250115024302466027607 0ustar rootrootpackage database import ( "github.com/creasty/defaults" "github.com/icinga/icinga-go-library/config" "github.com/icinga/icinga-go-library/testutils" "github.com/stretchr/testify/require" "os" "testing" ) // minimalYaml is a constant string representing a minimal valid YAML configuration for // connecting to a PostgreSQL database. PostgreSQL is explicitly chosen here to // test whether the default type (which is MySQL) is correctly overridden. const minimalYaml = ` type: pgsql host: localhost user: icinga database: icingadb password: secret` // minimalEnv returns a map of environment variables representing a minimal valid configuration for // connecting to a PostgreSQL database. PostgreSQL is explicitly chosen here to // test whether the default type (which is MySQL) is correctly overridden. func minimalEnv() map[string]string { return map[string]string{ "TYPE": "pgsql", "HOST": "localhost", "USER": "icinga", "DATABASE": "icingadb", "PASSWORD": "secret", } } // withMinimalEnv takes a map of environment variables and merges it with the // minimal environment configuration returned from minimalEnv, // overriding any existing keys with the provided values. // It returns the resulting map. func withMinimalEnv(v map[string]string) map[string]string { env := minimalEnv() for key, value := range v { env[key] = value } return env } func TestConfig(t *testing.T) { var defaultOptions Options require.NoError(t, defaults.Set(&defaultOptions), "setting default options") configTests := []testutils.TestCase[Config, testutils.ConfigTestData]{ { Name: "Unknown database type", Data: testutils.ConfigTestData{ Yaml: `type: invalid`, Env: map[string]string{"TYPE": "invalid"}, }, Error: testutils.ErrorContains(`unknown database type "invalid"`), }, { Name: "Database host missing", Data: testutils.ConfigTestData{ Yaml: `type: pgsql`, Env: map[string]string{"TYPE": "pgsql"}, }, Error: testutils.ErrorContains("database host missing"), }, { Name: "Database user missing", Data: testutils.ConfigTestData{ Yaml: ` type: pgsql host: localhost`, Env: map[string]string{ "TYPE": "pgsql", "HOST": "localhost", }, }, Error: testutils.ErrorContains("database user missing"), }, { Name: "Database name missing", Data: testutils.ConfigTestData{ Yaml: ` type: pgsql host: localhost user: icinga`, Env: map[string]string{ "TYPE": "pgsql", "HOST": "localhost", "USER": "icinga", }, }, Error: testutils.ErrorContains("database name missing"), }, { Name: "Minimal config", Data: testutils.ConfigTestData{ Yaml: minimalYaml, Env: minimalEnv(), }, Expected: Config{ Type: "pgsql", Host: "localhost", User: "icinga", Database: "icingadb", Password: "secret", Options: defaultOptions, }, }, { Name: "Retain defaults", Data: testutils.ConfigTestData{ Yaml: ` host: localhost user: icinga database: icinga`, Env: map[string]string{ "HOST": "localhost", "USER": "icinga", "DATABASE": "icinga", }, }, Expected: Config{ Type: "mysql", // Default Host: "localhost", User: "icinga", Database: "icinga", Options: defaultOptions, }, }, { Name: "TLS", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` tls: true cert: cert.pem key: key.pem ca: ca.pem`, Env: withMinimalEnv(map[string]string{ "TLS": "1", "CERT": "cert.pem", "KEY": "key.pem", "CA": "ca.pem", }), }, Expected: Config{ Type: "pgsql", Host: "localhost", User: "icinga", Database: "icingadb", Password: "secret", Options: defaultOptions, TlsOptions: config.TLS{ Enable: true, Cert: "cert.pem", Key: "key.pem", Ca: "ca.pem", }, }, }, { Name: "TLS with raw PEM", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` tls: true cert: |- -----BEGIN CERTIFICATE----- MIIBhTCCASugAwIBAgIQIRi6zePL6mKjOipn+dNuaTAKBggqhkjOPQQDAjASMRAw DgYDVQQKEwdBY21lIENvMB4XDTE3MTAyMDE5NDMwNloXDTE4MTAyMDE5NDMwNlow EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0d 7VNhbWvZLWPuj/RtHFjvtJBEwOkhbN/BnnE8rnZR8+sbwnc/KhCk3FhnpHZnQz7B 5aETbbIgmuvewdjvSBSjYzBhMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggr BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MCkGA1UdEQQiMCCCDmxvY2FsaG9zdDo1 NDUzgg4xMjcuMC4wLjE6NTQ1MzAKBggqhkjOPQQDAgNIADBFAiEA2zpJEPQyz6/l Wf86aX6PepsntZv2GYlA5UpabfT2EZICICpJ5h/iI+i341gBmLiAFQOyTDT+/wQc 6MF9+Yw1Yy0t -----END CERTIFICATE----- key: |- -----BEGIN EC PRIVATE KEY----- MHcCAQEEIIrYSSNQFaA2Hwf1duRSxKtLYX5CB04fSeQ6tF1aY/PuoAoGCCqGSM49 AwEHoUQDQgAEPR3tU2Fta9ktY+6P9G0cWO+0kETA6SFs38GecTyudlHz6xvCdz8q EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== -----END EC PRIVATE KEY----- ca: |- -----BEGIN CERTIFICATE----- MIICSTCCAfOgAwIBAgIUcmQfIJAvbxdVm0PFanS4FWH71Z0wDQYJKoZIhvcNAQEL BQAweTELMAkGA1UEBhMCREUxEjAQBgNVBAgMCUZyYW5jb25pYTESMBAGA1UEBwwJ TnVyZW1iZXJnMUIwQAYDVQQKDDlIb25lc3QgTWFya3VzJyBVc2VkIE51Y2xlYXIg UG93ZXIgUGxhbnRzIGFuZCBDZXJ0aWZpY2F0ZXMwHhcNMjUwMzA1MDk0ODIwWhcN MjUwMzA2MDk0ODIwWjB5MQswCQYDVQQGEwJERTESMBAGA1UECAwJRnJhbmNvbmlh MRIwEAYDVQQHDAlOdXJlbWJlcmcxQjBABgNVBAoMOUhvbmVzdCBNYXJrdXMnIFVz ZWQgTnVjbGVhciBQb3dlciBQbGFudHMgYW5kIENlcnRpZmljYXRlczBcMA0GCSqG SIb3DQEBAQUAA0sAMEgCQQCeEGX2IolvELSUjC1DqvJRbTs4DKwE8ZZHDAGrc5K9 DFrLKvkwgfv3g9R2NJE5o/A5vBLq22IDCFdI26M6t10HAgMBAAGjUzBRMB0GA1Ud DgQWBBQn+dCzVtAzYOGC8tIi9JLmRbWI7jAfBgNVHSMEGDAWgBQn+dCzVtAzYOGC 8tIi9JLmRbWI7jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA0EAlA27 ti1NKC+o+iZtyU8I/32aPaFme1+eQNIxvqXfw49jSM/FyDjhfZ0XlAxmK6tzF3mM LJZsYbxapLeyWoA05Q== -----END CERTIFICATE----- `, Env: withMinimalEnv(map[string]string{ "TLS": "1", "CERT": `-----BEGIN CERTIFICATE----- MIIBhTCCASugAwIBAgIQIRi6zePL6mKjOipn+dNuaTAKBggqhkjOPQQDAjASMRAw DgYDVQQKEwdBY21lIENvMB4XDTE3MTAyMDE5NDMwNloXDTE4MTAyMDE5NDMwNlow EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0d 7VNhbWvZLWPuj/RtHFjvtJBEwOkhbN/BnnE8rnZR8+sbwnc/KhCk3FhnpHZnQz7B 5aETbbIgmuvewdjvSBSjYzBhMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggr BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MCkGA1UdEQQiMCCCDmxvY2FsaG9zdDo1 NDUzgg4xMjcuMC4wLjE6NTQ1MzAKBggqhkjOPQQDAgNIADBFAiEA2zpJEPQyz6/l Wf86aX6PepsntZv2GYlA5UpabfT2EZICICpJ5h/iI+i341gBmLiAFQOyTDT+/wQc 6MF9+Yw1Yy0t -----END CERTIFICATE-----`, "KEY": `-----BEGIN EC PRIVATE KEY----- MHcCAQEEIIrYSSNQFaA2Hwf1duRSxKtLYX5CB04fSeQ6tF1aY/PuoAoGCCqGSM49 AwEHoUQDQgAEPR3tU2Fta9ktY+6P9G0cWO+0kETA6SFs38GecTyudlHz6xvCdz8q EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== -----END EC PRIVATE KEY-----`, "CA": `-----BEGIN CERTIFICATE----- MIICSTCCAfOgAwIBAgIUcmQfIJAvbxdVm0PFanS4FWH71Z0wDQYJKoZIhvcNAQEL BQAweTELMAkGA1UEBhMCREUxEjAQBgNVBAgMCUZyYW5jb25pYTESMBAGA1UEBwwJ TnVyZW1iZXJnMUIwQAYDVQQKDDlIb25lc3QgTWFya3VzJyBVc2VkIE51Y2xlYXIg UG93ZXIgUGxhbnRzIGFuZCBDZXJ0aWZpY2F0ZXMwHhcNMjUwMzA1MDk0ODIwWhcN MjUwMzA2MDk0ODIwWjB5MQswCQYDVQQGEwJERTESMBAGA1UECAwJRnJhbmNvbmlh MRIwEAYDVQQHDAlOdXJlbWJlcmcxQjBABgNVBAoMOUhvbmVzdCBNYXJrdXMnIFVz ZWQgTnVjbGVhciBQb3dlciBQbGFudHMgYW5kIENlcnRpZmljYXRlczBcMA0GCSqG SIb3DQEBAQUAA0sAMEgCQQCeEGX2IolvELSUjC1DqvJRbTs4DKwE8ZZHDAGrc5K9 DFrLKvkwgfv3g9R2NJE5o/A5vBLq22IDCFdI26M6t10HAgMBAAGjUzBRMB0GA1Ud DgQWBBQn+dCzVtAzYOGC8tIi9JLmRbWI7jAfBgNVHSMEGDAWgBQn+dCzVtAzYOGC 8tIi9JLmRbWI7jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA0EAlA27 ti1NKC+o+iZtyU8I/32aPaFme1+eQNIxvqXfw49jSM/FyDjhfZ0XlAxmK6tzF3mM LJZsYbxapLeyWoA05Q== -----END CERTIFICATE-----`, }), }, Expected: Config{ Type: "pgsql", Host: "localhost", User: "icinga", Database: "icingadb", Password: "secret", Options: defaultOptions, TlsOptions: config.TLS{ Enable: true, Cert: `-----BEGIN CERTIFICATE----- MIIBhTCCASugAwIBAgIQIRi6zePL6mKjOipn+dNuaTAKBggqhkjOPQQDAjASMRAw DgYDVQQKEwdBY21lIENvMB4XDTE3MTAyMDE5NDMwNloXDTE4MTAyMDE5NDMwNlow EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0d 7VNhbWvZLWPuj/RtHFjvtJBEwOkhbN/BnnE8rnZR8+sbwnc/KhCk3FhnpHZnQz7B 5aETbbIgmuvewdjvSBSjYzBhMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggr BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MCkGA1UdEQQiMCCCDmxvY2FsaG9zdDo1 NDUzgg4xMjcuMC4wLjE6NTQ1MzAKBggqhkjOPQQDAgNIADBFAiEA2zpJEPQyz6/l Wf86aX6PepsntZv2GYlA5UpabfT2EZICICpJ5h/iI+i341gBmLiAFQOyTDT+/wQc 6MF9+Yw1Yy0t -----END CERTIFICATE-----`, Key: `-----BEGIN EC PRIVATE KEY----- MHcCAQEEIIrYSSNQFaA2Hwf1duRSxKtLYX5CB04fSeQ6tF1aY/PuoAoGCCqGSM49 AwEHoUQDQgAEPR3tU2Fta9ktY+6P9G0cWO+0kETA6SFs38GecTyudlHz6xvCdz8q EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== -----END EC PRIVATE KEY-----`, Ca: `-----BEGIN CERTIFICATE----- MIICSTCCAfOgAwIBAgIUcmQfIJAvbxdVm0PFanS4FWH71Z0wDQYJKoZIhvcNAQEL BQAweTELMAkGA1UEBhMCREUxEjAQBgNVBAgMCUZyYW5jb25pYTESMBAGA1UEBwwJ TnVyZW1iZXJnMUIwQAYDVQQKDDlIb25lc3QgTWFya3VzJyBVc2VkIE51Y2xlYXIg UG93ZXIgUGxhbnRzIGFuZCBDZXJ0aWZpY2F0ZXMwHhcNMjUwMzA1MDk0ODIwWhcN MjUwMzA2MDk0ODIwWjB5MQswCQYDVQQGEwJERTESMBAGA1UECAwJRnJhbmNvbmlh MRIwEAYDVQQHDAlOdXJlbWJlcmcxQjBABgNVBAoMOUhvbmVzdCBNYXJrdXMnIFVz ZWQgTnVjbGVhciBQb3dlciBQbGFudHMgYW5kIENlcnRpZmljYXRlczBcMA0GCSqG SIb3DQEBAQUAA0sAMEgCQQCeEGX2IolvELSUjC1DqvJRbTs4DKwE8ZZHDAGrc5K9 DFrLKvkwgfv3g9R2NJE5o/A5vBLq22IDCFdI26M6t10HAgMBAAGjUzBRMB0GA1Ud DgQWBBQn+dCzVtAzYOGC8tIi9JLmRbWI7jAfBgNVHSMEGDAWgBQn+dCzVtAzYOGC 8tIi9JLmRbWI7jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA0EAlA27 ti1NKC+o+iZtyU8I/32aPaFme1+eQNIxvqXfw49jSM/FyDjhfZ0XlAxmK6tzF3mM LJZsYbxapLeyWoA05Q== -----END CERTIFICATE-----`, }, }, }, { Name: "max_connections cannot be 0", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` options: max_connections: 0`, Env: withMinimalEnv(map[string]string{"OPTIONS_MAX_CONNECTIONS": "0"}), }, Error: testutils.ErrorContains("max_connections cannot be 0"), }, { Name: "max_connections_per_table must be at least 1", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` options: max_connections_per_table: 0`, Env: withMinimalEnv(map[string]string{"OPTIONS_MAX_CONNECTIONS_PER_TABLE": "0"}), }, Error: testutils.ErrorContains("max_connections_per_table must be at least 1"), }, { Name: "max_placeholders_per_statement must be at least 1", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` options: max_placeholders_per_statement: 0`, Env: withMinimalEnv(map[string]string{"OPTIONS_MAX_PLACEHOLDERS_PER_STATEMENT": "0"}), }, Error: testutils.ErrorContains("max_placeholders_per_statement must be at least 1"), }, { Name: "max_rows_per_transaction must be at least 1", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` options: max_rows_per_transaction: 0`, Env: withMinimalEnv(map[string]string{"OPTIONS_MAX_ROWS_PER_TRANSACTION": "0"}), }, Error: testutils.ErrorContains("max_rows_per_transaction must be at least 1"), }, { Name: "wsrep_sync_wait can only be set to a number between 0 and 15", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` options: wsrep_sync_wait: 16`, Env: withMinimalEnv(map[string]string{"OPTIONS_WSREP_SYNC_WAIT": "16"}), }, Error: testutils.ErrorContains("wsrep_sync_wait can only be set to a number between 0 and 15"), }, { Name: "Options retain defaults", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` options: max_connections: 8 max_connections_per_table: 4`, Env: withMinimalEnv(map[string]string{ "OPTIONS_MAX_CONNECTIONS": "8", "OPTIONS_MAX_CONNECTIONS_PER_TABLE": "4", }), }, Expected: Config{ Type: "pgsql", Host: "localhost", User: "icinga", Database: "icingadb", Password: "secret", Options: Options{ MaxConnections: 8, MaxConnectionsPerTable: 4, MaxPlaceholdersPerStatement: defaultOptions.MaxPlaceholdersPerStatement, MaxRowsPerTransaction: defaultOptions.MaxRowsPerTransaction, WsrepSyncWait: defaultOptions.WsrepSyncWait, }, }, }, { Name: "Options", Data: testutils.ConfigTestData{ Yaml: minimalYaml + ` options: max_connections: 8 max_connections_per_table: 4 max_placeholders_per_statement: 4096 max_rows_per_transaction: 2048 wsrep_sync_wait: 15`, Env: withMinimalEnv(map[string]string{ "OPTIONS_MAX_CONNECTIONS": "8", "OPTIONS_MAX_CONNECTIONS_PER_TABLE": "4", "OPTIONS_MAX_PLACEHOLDERS_PER_STATEMENT": "4096", "OPTIONS_MAX_ROWS_PER_TRANSACTION": "2048", "OPTIONS_WSREP_SYNC_WAIT": "15", }), }, Expected: Config{ Type: "pgsql", Host: "localhost", User: "icinga", Database: "icingadb", Password: "secret", Options: Options{ MaxConnections: 8, MaxConnectionsPerTable: 4, MaxPlaceholdersPerStatement: 4096, MaxRowsPerTransaction: 2048, WsrepSyncWait: 15, }, }, }, } t.Run("FromEnv", func(t *testing.T) { for _, tc := range configTests { t.Run(tc.Name, tc.F(func(data testutils.ConfigTestData) (Config, error) { var actual Config err := config.FromEnv(&actual, config.EnvOptions{Environment: data.Env}) return actual, err })) } }) t.Run("FromYAMLFile", func(t *testing.T) { for _, tc := range configTests { t.Run(tc.Name+"/FromYAMLFile", tc.F(func(data testutils.ConfigTestData) (Config, error) { var actual Config var err error testutils.WithYAMLFile(t, data.Yaml, func(file *os.File) { err = config.FromYAMLFile(file.Name(), &actual) }) return actual, err })) } }) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/utils.go0000644000000000000000000000746215024302466026453 0ustar rootrootpackage database import ( "context" "database/sql/driver" "fmt" "github.com/go-sql-driver/mysql" "github.com/icinga/icinga-go-library/com" "github.com/icinga/icinga-go-library/strcase" "github.com/icinga/icinga-go-library/types" "github.com/jmoiron/sqlx" "github.com/pkg/errors" "slices" "strings" ) // CantPerformQuery wraps the given error with the specified query that cannot be executed. func CantPerformQuery(err error, q string) error { return errors.Wrapf(err, "can't perform %q", q) } // TableName returns the table of t. func TableName(t interface{}) string { if tn, ok := t.(TableNamer); ok { return tn.TableName() } else { return strcase.Snake(types.Name(t)) } } // SplitOnDupId returns a state machine which tracks the inputs' IDs. // Once an already seen input arrives, it demands splitting. func SplitOnDupId[T IDer]() com.BulkChunkSplitPolicy[T] { seenIds := map[string]struct{}{} return func(ider T) bool { id := ider.ID().String() _, ok := seenIds[id] if ok { seenIds = map[string]struct{}{id: {}} } else { seenIds[id] = struct{}{} } return ok } } // InsertObtainID executes the given query and fetches the last inserted ID. // // Using this method for database tables that don't define an auto-incrementing ID, or none at all, // will not work. The only supported column that can be retrieved with this method is id. // // This function expects [TxOrDB] as an executor of the provided query, and is usually a *[sqlx.Tx] or *[DB] instance. // // Returns the retrieved ID on success and error on any database inserting/retrieving failure. func InsertObtainID(ctx context.Context, conn TxOrDB, stmt string, arg any) (int64, error) { var resultID int64 switch conn.DriverName() { case PostgreSQL: stmt = stmt + " RETURNING id" query, args, err := conn.BindNamed(stmt, arg) if err != nil { return 0, errors.Wrapf(err, "can't bind named query %q", stmt) } if err := sqlx.GetContext(ctx, conn, &resultID, query, args...); err != nil { return 0, CantPerformQuery(err, query) } default: result, err := sqlx.NamedExecContext(ctx, conn, stmt, arg) if err != nil { return 0, CantPerformQuery(err, stmt) } resultID, err = result.LastInsertId() if err != nil { return 0, errors.Wrap(err, "can't retrieve last inserted ID") } } return resultID, nil } // BuildInsertStmtWithout builds an insert stmt without the provided columns. func BuildInsertStmtWithout(db *DB, into interface{}, withoutColumns ...string) string { columns := slices.DeleteFunc( db.BuildColumns(into), func(column string) bool { return slices.Contains(withoutColumns, column) }) return fmt.Sprintf( `INSERT INTO "%s" ("%s") VALUES (%s)`, TableName(into), strings.Join(columns, `", "`), fmt.Sprintf(":%s", strings.Join(columns, ", :")), ) } // unsafeSetSessionVariableIfExists sets the given MySQL/MariaDB system variable for the specified database session. // // NOTE: It is unsafe to use this function with untrusted/user supplied inputs and poses an SQL injection, // because it doesn't use a prepared statement, but executes the SQL command directly with the provided inputs. // // When the "SET SESSION" command fails with "Unknown system variable (1193)", the error will be silently // dropped but returns all other database errors. func unsafeSetSessionVariableIfExists(ctx context.Context, conn driver.Conn, variable, value string) error { stmt := fmt.Sprintf("SET SESSION %s=%s", variable, value) exe, ok := conn.(driver.ExecerContext) if !ok { return fmt.Errorf("conn is not a driver.ExecerContext") } if _, err := exe.ExecContext(ctx, stmt, nil); err != nil { if errors.Is(err, &mysql.MySQLError{Number: 1193}) { // Unknown system variable return nil } return CantPerformQuery(err, stmt) } return nil } var ( _ com.BulkChunkSplitPolicyFactory[Entity] = SplitOnDupId[Entity] ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/db.go0000644000000000000000000007110215024302466025670 0ustar rootrootpackage database import ( "context" "database/sql" "database/sql/driver" "fmt" "github.com/go-sql-driver/mysql" "github.com/icinga/icinga-go-library/backoff" "github.com/icinga/icinga-go-library/com" "github.com/icinga/icinga-go-library/logging" "github.com/icinga/icinga-go-library/periodic" "github.com/icinga/icinga-go-library/retry" "github.com/icinga/icinga-go-library/strcase" "github.com/icinga/icinga-go-library/utils" "github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx/reflectx" "github.com/lib/pq" "github.com/pkg/errors" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" "net" "net/url" "slices" "strconv" "strings" "sync" "time" ) // DB is a wrapper around sqlx.DB with bulk execution, // statement building, streaming and logging capabilities. type DB struct { *sqlx.DB Options *Options addr string columnMap ColumnMap logger *logging.Logger tableSemaphores map[string]*semaphore.Weighted tableSemaphoresMu sync.Mutex } // Options define user configurable database options. type Options struct { // Maximum number of open connections to the database. MaxConnections int `yaml:"max_connections" env:"MAX_CONNECTIONS" default:"16"` // Maximum number of connections per table, // regardless of what the connection is actually doing, // e.g. INSERT, UPDATE, DELETE. MaxConnectionsPerTable int `yaml:"max_connections_per_table" env:"MAX_CONNECTIONS_PER_TABLE" default:"8"` // MaxPlaceholdersPerStatement defines the maximum number of placeholders in an // INSERT, UPDATE or DELETE statement. Theoretically, MySQL can handle up to 2^16-1 placeholders, // but this increases the execution time of queries and thus reduces the number of queries // that can be executed in parallel in a given time. // The default is 2^13, which in our tests showed the best performance in terms of execution time and parallelism. MaxPlaceholdersPerStatement int `yaml:"max_placeholders_per_statement" env:"MAX_PLACEHOLDERS_PER_STATEMENT" default:"8192"` // MaxRowsPerTransaction defines the maximum number of rows per transaction. // The default is 2^13, which in our tests showed the best performance in terms of execution time and parallelism. MaxRowsPerTransaction int `yaml:"max_rows_per_transaction" env:"MAX_ROWS_PER_TRANSACTION" default:"8192"` // WsrepSyncWait enforces Galera cluster nodes to perform strict cluster-wide causality checks // before executing specific SQL queries determined by the number you provided. // Please refer to the below link for a detailed description. // https://icinga.com/docs/icinga-db/latest/doc/03-Configuration/#galera-cluster WsrepSyncWait int `yaml:"wsrep_sync_wait" env:"WSREP_SYNC_WAIT" default:"7"` } // Validate checks constraints in the supplied database options and returns an error if they are violated. func (o *Options) Validate() error { if o.MaxConnections == 0 { return errors.New("max_connections cannot be 0. Configure a value greater than zero, or use -1 for no connection limit") } if o.MaxConnectionsPerTable < 1 { return errors.New("max_connections_per_table must be at least 1") } if o.MaxPlaceholdersPerStatement < 1 { return errors.New("max_placeholders_per_statement must be at least 1") } if o.MaxRowsPerTransaction < 1 { return errors.New("max_rows_per_transaction must be at least 1") } if o.WsrepSyncWait < 0 || o.WsrepSyncWait > 15 { return errors.New("wsrep_sync_wait can only be set to a number between 0 and 15") } return nil } // NewDbFromConfig returns a new DB from Config. func NewDbFromConfig(c *Config, logger *logging.Logger, connectorCallbacks RetryConnectorCallbacks) (*DB, error) { var addr string var db *sqlx.DB switch c.Type { case "mysql": config := mysql.NewConfig() config.User = c.User config.Passwd = c.Password config.Logger = MysqlFuncLogger(logger.Debug) if utils.IsUnixAddr(c.Host) { config.Net = "unix" config.Addr = c.Host addr = "(" + config.Addr + ")" } else { config.Net = "tcp" port := c.Port if port == 0 { port = 3306 } config.Addr = net.JoinHostPort(c.Host, fmt.Sprint(port)) addr = config.Addr } config.DBName = c.Database config.Timeout = time.Minute config.Params = map[string]string{"sql_mode": "'TRADITIONAL,ANSI_QUOTES'"} tlsConfig, err := c.TlsOptions.MakeConfig(c.Host) if err != nil { return nil, err } config.TLS = tlsConfig connector, err := mysql.NewConnector(config) if err != nil { return nil, errors.Wrap(err, "can't open mysql database") } onInitConn := connectorCallbacks.OnInitConn connectorCallbacks.OnInitConn = func(ctx context.Context, conn driver.Conn) error { if onInitConn != nil { if err := onInitConn(ctx, conn); err != nil { return err } } // Set the "wsrep_sync_wait" variable for each session and ensures that causality checks are performed // before execution and that each statement is executed on a fully synchronized node. Doing so prevents // foreign key violation when inserting into dependent tables on different MariaDB/MySQL nodes. When using // MySQL single nodes, the "SET SESSION" command will fail with "Unknown system variable (1193)" and will // therefore be silently dropped. // https://mariadb.com/kb/en/galera-cluster-system-variables/#wsrep_sync_wait return unsafeSetSessionVariableIfExists(ctx, conn, "wsrep_sync_wait", fmt.Sprint(c.Options.WsrepSyncWait)) } db = sqlx.NewDb(sql.OpenDB(NewConnector(connector, logger, connectorCallbacks)), MySQL) case "pgsql": uri := &url.URL{ Scheme: "postgres", User: url.UserPassword(c.User, c.Password), Path: "/" + url.PathEscape(c.Database), } query := url.Values{ "connect_timeout": {"60"}, "binary_parameters": {"yes"}, // Host and port can alternatively be specified in the query string. lib/pq can't parse the connection URI // if a Unix domain socket path is specified in the host part of the URI, therefore always use the query // string. See also https://github.com/lib/pq/issues/796 "host": {c.Host}, } port := c.Port if port == 0 { port = 5432 } query.Set("port", strconv.FormatInt(int64(port), 10)) if _, err := c.TlsOptions.MakeConfig(c.Host); err != nil { return nil, err } if c.TlsOptions.Enable { if c.TlsOptions.Insecure { query.Set("sslmode", "require") } else { query.Set("sslmode", "verify-full") } if c.TlsOptions.Cert != "" { query.Set("sslcert", c.TlsOptions.Cert) } if c.TlsOptions.Key != "" { query.Set("sslkey", c.TlsOptions.Key) } if c.TlsOptions.Ca != "" { query.Set("sslrootcert", c.TlsOptions.Ca) } } else { query.Set("sslmode", "disable") } uri.RawQuery = query.Encode() connector, err := pq.NewConnector(uri.String()) if err != nil { return nil, errors.Wrap(err, "can't open pgsql database") } if utils.IsUnixAddr(c.Host) { // https://www.postgresql.org/docs/17/runtime-config-connection.html#GUC-UNIX-SOCKET-DIRECTORIES addr = fmt.Sprintf("(%s/.s.PGSQL.%d)", strings.TrimRight(c.Host, "/"), port) } else { addr = utils.JoinHostPort(c.Host, port) } db = sqlx.NewDb(sql.OpenDB(NewConnector(connector, logger, connectorCallbacks)), PostgreSQL) default: return nil, unknownDbType(c.Type) } if c.TlsOptions.Enable { addr = fmt.Sprintf("%s+tls://%s@%s/%s", c.Type, c.User, addr, c.Database) } else { addr = fmt.Sprintf("%s://%s@%s/%s", c.Type, c.User, addr, c.Database) } db.SetMaxIdleConns(c.Options.MaxConnections / 3) db.SetMaxOpenConns(c.Options.MaxConnections) db.Mapper = reflectx.NewMapperFunc("db", strcase.Snake) return &DB{ DB: db, Options: &c.Options, columnMap: NewColumnMap(db.Mapper), addr: addr, logger: logger, tableSemaphores: make(map[string]*semaphore.Weighted), }, nil } // GetAddr returns a URI-like database connection string. // // It has the following syntax: // // type[+tls]://user@host[:port]/database func (db *DB) GetAddr() string { return db.addr } // MarshalLogObject implements [zapcore.ObjectMarshaler], adding the database address [DB.GetAddr] to each log message. func (db *DB) MarshalLogObject(encoder zapcore.ObjectEncoder) error { encoder.AddString("database_address", db.GetAddr()) return nil } // BuildColumns returns all columns of the given struct. func (db *DB) BuildColumns(subject interface{}) []string { return slices.Clone(db.columnMap.Columns(subject)) } // BuildDeleteStmt returns a DELETE statement for the given struct. func (db *DB) BuildDeleteStmt(from interface{}) string { return fmt.Sprintf( `DELETE FROM "%s" WHERE id IN (?)`, TableName(from), ) } // BuildInsertStmt returns an INSERT INTO statement for the given struct. func (db *DB) BuildInsertStmt(into interface{}) (string, int) { columns := db.columnMap.Columns(into) return fmt.Sprintf( `INSERT INTO "%s" ("%s") VALUES (%s)`, TableName(into), strings.Join(columns, `", "`), fmt.Sprintf(":%s", strings.Join(columns, ", :")), ), len(columns) } // BuildInsertIgnoreStmt returns an INSERT statement for the specified struct for // which the database ignores rows that have already been inserted. func (db *DB) BuildInsertIgnoreStmt(into interface{}) (string, int) { table := TableName(into) columns := db.columnMap.Columns(into) var clause string switch db.DriverName() { case MySQL: // MySQL treats UPDATE id = id as a no-op. clause = fmt.Sprintf(`ON DUPLICATE KEY UPDATE "%s" = "%s"`, columns[0], columns[0]) case PostgreSQL: var constraint string if constrainter, ok := into.(PgsqlOnConflictConstrainter); ok { constraint = constrainter.PgsqlOnConflictConstraint() } else { constraint = "pk_" + table } clause = fmt.Sprintf("ON CONFLICT ON CONSTRAINT %s DO NOTHING", constraint) } return fmt.Sprintf( `INSERT INTO "%s" ("%s") VALUES (%s) %s`, table, strings.Join(columns, `", "`), fmt.Sprintf(":%s", strings.Join(columns, ", :")), clause, ), len(columns) } // BuildSelectStmt returns a SELECT query that creates the FROM part from the given table struct // and the column list from the specified columns struct. func (db *DB) BuildSelectStmt(table interface{}, columns interface{}) string { q := fmt.Sprintf( `SELECT "%s" FROM "%s"`, strings.Join(db.columnMap.Columns(columns), `", "`), TableName(table), ) if scoper, ok := table.(Scoper); ok { where, _ := db.BuildWhere(scoper.Scope()) q += ` WHERE ` + where } return q } // BuildUpdateStmt returns an UPDATE statement for the given struct. func (db *DB) BuildUpdateStmt(update interface{}) (string, int) { columns := db.columnMap.Columns(update) set := make([]string, 0, len(columns)) for _, col := range columns { set = append(set, fmt.Sprintf(`"%s" = :%s`, col, col)) } return fmt.Sprintf( `UPDATE "%s" SET %s WHERE id = :id`, TableName(update), strings.Join(set, ", "), ), len(columns) + 1 // +1 because of WHERE id = :id } // BuildUpsertStmt returns an upsert statement for the given struct. func (db *DB) BuildUpsertStmt(subject interface{}) (stmt string, placeholders int) { insertColumns := db.columnMap.Columns(subject) table := TableName(subject) var updateColumns []string if upserter, ok := subject.(Upserter); ok { updateColumns = db.columnMap.Columns(upserter.Upsert()) } else { updateColumns = insertColumns } var clause, setFormat string switch db.DriverName() { case MySQL: clause = "ON DUPLICATE KEY UPDATE" setFormat = `"%[1]s" = VALUES("%[1]s")` case PostgreSQL: var constraint string if constrainter, ok := subject.(PgsqlOnConflictConstrainter); ok { constraint = constrainter.PgsqlOnConflictConstraint() } else { constraint = "pk_" + table } clause = fmt.Sprintf("ON CONFLICT ON CONSTRAINT %s DO UPDATE SET", constraint) setFormat = `"%[1]s" = EXCLUDED."%[1]s"` } set := make([]string, 0, len(updateColumns)) for _, col := range updateColumns { set = append(set, fmt.Sprintf(setFormat, col)) } return fmt.Sprintf( `INSERT INTO "%s" ("%s") VALUES (%s) %s %s`, table, strings.Join(insertColumns, `", "`), fmt.Sprintf(":%s", strings.Join(insertColumns, ",:")), clause, strings.Join(set, ","), ), len(insertColumns) } // BuildWhere returns a WHERE clause with named placeholder conditions built from the specified struct // combined with the AND operator. func (db *DB) BuildWhere(subject interface{}) (string, int) { columns := db.columnMap.Columns(subject) where := make([]string, 0, len(columns)) for _, col := range columns { where = append(where, fmt.Sprintf(`"%s" = :%s`, col, col)) } return strings.Join(where, ` AND `), len(columns) } // OnSuccess is a callback for successful (bulk) DML operations. type OnSuccess[T any] func(ctx context.Context, affectedRows []T) (err error) func OnSuccessIncrement[T any](counter *com.Counter) OnSuccess[T] { return func(_ context.Context, rows []T) error { counter.Add(uint64(len(rows))) return nil } } func OnSuccessSendTo[T any](ch chan<- T) OnSuccess[T] { return func(ctx context.Context, rows []T) error { for _, row := range rows { select { case ch <- row: case <-ctx.Done(): return ctx.Err() } } return nil } } // BulkExec bulk executes queries with a single slice placeholder in the form of `IN (?)`. // Takes in up to the number of arguments specified in count from the arg stream, // derives and expands a query and executes it with this set of arguments until the arg stream has been processed. // The derived queries are executed in a separate goroutine with a weighting of 1 // and can be executed concurrently to the extent allowed by the semaphore passed in sem. // Arguments for which the query ran successfully will be passed to onSuccess. func (db *DB) BulkExec( ctx context.Context, query string, count int, sem *semaphore.Weighted, arg <-chan any, onSuccess ...OnSuccess[any], ) error { var counter com.Counter defer db.Log(ctx, query, &counter).Stop() g, ctx := errgroup.WithContext(ctx) // Use context from group. bulk := com.Bulk(ctx, arg, count, com.NeverSplit[any]) g.Go(func() error { g, ctx := errgroup.WithContext(ctx) for b := range bulk { if err := sem.Acquire(ctx, 1); err != nil { return errors.Wrap(err, "can't acquire semaphore") } g.Go(func(b []interface{}) func() error { return func() error { defer sem.Release(1) return retry.WithBackoff( ctx, func(context.Context) error { stmt, args, err := sqlx.In(query, b) if err != nil { return errors.Wrapf(err, "can't build placeholders for %q", query) } stmt = db.Rebind(stmt) _, err = db.ExecContext(ctx, stmt, args...) if err != nil { return CantPerformQuery(err, query) } counter.Add(uint64(len(b))) for _, onSuccess := range onSuccess { if err := onSuccess(ctx, b); err != nil { return err } } return nil }, retry.Retryable, backoff.DefaultBackoff, db.GetDefaultRetrySettings(), ) } }(b)) } return g.Wait() }) return g.Wait() } // NamedBulkExec bulk executes queries with named placeholders in a VALUES clause most likely // in the format INSERT ... VALUES. Takes in up to the number of entities specified in count // from the arg stream, derives and executes a new query with the VALUES clause expanded to // this set of arguments, until the arg stream has been processed. // The queries are executed in a separate goroutine with a weighting of 1 // and can be executed concurrently to the extent allowed by the semaphore passed in sem. // Entities for which the query ran successfully will be passed to onSuccess. func (db *DB) NamedBulkExec( ctx context.Context, query string, count int, sem *semaphore.Weighted, arg <-chan Entity, splitPolicyFactory com.BulkChunkSplitPolicyFactory[Entity], onSuccess ...OnSuccess[Entity], ) error { var counter com.Counter defer db.Log(ctx, query, &counter).Stop() g, ctx := errgroup.WithContext(ctx) bulk := com.Bulk(ctx, arg, count, splitPolicyFactory) g.Go(func() error { for { select { case b, ok := <-bulk: if !ok { return nil } if err := sem.Acquire(ctx, 1); err != nil { return errors.Wrap(err, "can't acquire semaphore") } g.Go(func(b []Entity) func() error { return func() error { defer sem.Release(1) return retry.WithBackoff( ctx, func(ctx context.Context) error { _, err := db.NamedExecContext(ctx, query, b) if err != nil { return CantPerformQuery(err, query) } counter.Add(uint64(len(b))) for _, onSuccess := range onSuccess { if err := onSuccess(ctx, b); err != nil { return err } } return nil }, retry.Retryable, backoff.DefaultBackoff, db.GetDefaultRetrySettings(), ) } }(b)) case <-ctx.Done(): return ctx.Err() } } }) return g.Wait() } // NamedBulkExecTx bulk executes queries with named placeholders in separate transactions. // Takes in up to the number of entities specified in count from the arg stream and // executes a new transaction that runs a new query for each entity in this set of arguments, // until the arg stream has been processed. // // The transactions are executed in a separate goroutine with a weighting of 1 // and can be executed concurrently to the extent allowed by the semaphore passed in sem. // // Note that committing the transaction may not honor the context provided, as described further in [DB.ExecTx]. func (db *DB) NamedBulkExecTx( ctx context.Context, query string, count int, sem *semaphore.Weighted, arg <-chan Entity, ) error { var counter com.Counter defer db.Log(ctx, query, &counter).Stop() g, ctx := errgroup.WithContext(ctx) bulk := com.Bulk(ctx, arg, count, com.NeverSplit[Entity]) g.Go(func() error { for { select { case b, ok := <-bulk: if !ok { return nil } if err := sem.Acquire(ctx, 1); err != nil { return errors.Wrap(err, "can't acquire semaphore") } g.Go(func(b []Entity) func() error { return func() error { defer sem.Release(1) return retry.WithBackoff( ctx, func(ctx context.Context) error { tx, err := db.BeginTxx(ctx, nil) if err != nil { return errors.Wrap(err, "can't start transaction") } defer func() { _ = tx.Rollback() }() stmt, err := tx.PrepareNamedContext(ctx, query) if err != nil { return errors.Wrap(err, "can't prepare named statement with context in transaction") } defer func() { _ = stmt.Close() }() for _, arg := range b { if _, err := stmt.ExecContext(ctx, arg); err != nil { return errors.Wrap(err, "can't execute statement in transaction") } } if err := tx.Commit(); err != nil { return errors.Wrap(err, "can't commit transaction") } counter.Add(uint64(len(b))) return nil }, retry.Retryable, backoff.DefaultBackoff, db.GetDefaultRetrySettings(), ) } }(b)) case <-ctx.Done(): return ctx.Err() } } }) return g.Wait() } // BatchSizeByPlaceholders returns how often the specified number of placeholders fits // into Options.MaxPlaceholdersPerStatement, but at least 1. func (db *DB) BatchSizeByPlaceholders(n int) int { s := db.Options.MaxPlaceholdersPerStatement / n if s > 0 { return s } return 1 } // YieldAll executes the query with the supplied scope, // scans each resulting row into an entity returned by the factory function, // and streams them into a returned channel. func (db *DB) YieldAll(ctx context.Context, factoryFunc EntityFactoryFunc, query string, scope interface{}) (<-chan Entity, <-chan error) { entities := make(chan Entity, 1) g, ctx := errgroup.WithContext(ctx) g.Go(func() error { var counter com.Counter defer db.Log(ctx, query, &counter).Stop() defer close(entities) rows, err := db.NamedQueryContext(ctx, query, scope) if err != nil { return CantPerformQuery(err, query) } defer func() { _ = rows.Close() }() for rows.Next() { e := factoryFunc() if err := rows.StructScan(e); err != nil { return errors.Wrapf(err, "can't store query result into a %T: %s", e, query) } select { case entities <- e: counter.Inc() case <-ctx.Done(): return ctx.Err() } } return nil }) return entities, com.WaitAsync(g) } // CreateStreamed bulk creates the specified entities via NamedBulkExec. // The insert statement is created using BuildInsertStmt with the first entity from the entities stream. // Bulk size is controlled via Options.MaxPlaceholdersPerStatement and // concurrency is controlled via Options.MaxConnectionsPerTable. // Entities for which the query ran successfully will be passed to onSuccess. func (db *DB) CreateStreamed( ctx context.Context, entities <-chan Entity, onSuccess ...OnSuccess[Entity], ) error { first, forward, err := com.CopyFirst(ctx, entities) if err != nil { return errors.Wrap(err, "can't copy first entity") } sem := db.GetSemaphoreForTable(TableName(first)) stmt, placeholders := db.BuildInsertStmt(first) return db.NamedBulkExec( ctx, stmt, db.BatchSizeByPlaceholders(placeholders), sem, forward, com.NeverSplit[Entity], onSuccess..., ) } // CreateIgnoreStreamed bulk creates the specified entities via NamedBulkExec. // The insert statement is created using BuildInsertIgnoreStmt with the first entity from the entities stream. // Bulk size is controlled via Options.MaxPlaceholdersPerStatement and // concurrency is controlled via Options.MaxConnectionsPerTable. // Entities for which the query ran successfully will be passed to onSuccess. func (db *DB) CreateIgnoreStreamed( ctx context.Context, entities <-chan Entity, onSuccess ...OnSuccess[Entity], ) error { first, forward, err := com.CopyFirst(ctx, entities) if err != nil { return errors.Wrap(err, "can't copy first entity") } sem := db.GetSemaphoreForTable(TableName(first)) stmt, placeholders := db.BuildInsertIgnoreStmt(first) return db.NamedBulkExec( ctx, stmt, db.BatchSizeByPlaceholders(placeholders), sem, forward, SplitOnDupId[Entity], onSuccess..., ) } // UpsertStreamed bulk upserts the specified entities via NamedBulkExec. // The upsert statement is created using BuildUpsertStmt with the first entity from the entities stream. // Bulk size is controlled via Options.MaxPlaceholdersPerStatement and // concurrency is controlled via Options.MaxConnectionsPerTable. // Entities for which the query ran successfully will be passed to onSuccess. func (db *DB) UpsertStreamed( ctx context.Context, entities <-chan Entity, onSuccess ...OnSuccess[Entity], ) error { first, forward, err := com.CopyFirst(ctx, entities) if err != nil { return errors.Wrap(err, "can't copy first entity") } sem := db.GetSemaphoreForTable(TableName(first)) stmt, placeholders := db.BuildUpsertStmt(first) return db.NamedBulkExec( ctx, stmt, db.BatchSizeByPlaceholders(placeholders), sem, forward, SplitOnDupId[Entity], onSuccess..., ) } // UpdateStreamed bulk updates the specified entities via NamedBulkExecTx. // The update statement is created using BuildUpdateStmt with the first entity from the entities stream. // Bulk size is controlled via Options.MaxRowsPerTransaction and // concurrency is controlled via Options.MaxConnectionsPerTable. func (db *DB) UpdateStreamed(ctx context.Context, entities <-chan Entity) error { first, forward, err := com.CopyFirst(ctx, entities) if err != nil { return errors.Wrap(err, "can't copy first entity") } sem := db.GetSemaphoreForTable(TableName(first)) stmt, _ := db.BuildUpdateStmt(first) return db.NamedBulkExecTx(ctx, stmt, db.Options.MaxRowsPerTransaction, sem, forward) } // DeleteStreamed bulk deletes the specified ids via BulkExec. // The delete statement is created using BuildDeleteStmt with the passed entityType. // Bulk size is controlled via Options.MaxPlaceholdersPerStatement and // concurrency is controlled via Options.MaxConnectionsPerTable. // IDs for which the query ran successfully will be passed to onSuccess. func (db *DB) DeleteStreamed( ctx context.Context, entityType Entity, ids <-chan interface{}, onSuccess ...OnSuccess[any], ) error { sem := db.GetSemaphoreForTable(TableName(entityType)) return db.BulkExec( ctx, db.BuildDeleteStmt(entityType), db.Options.MaxPlaceholdersPerStatement, sem, ids, onSuccess..., ) } // Delete creates a channel from the specified ids and // bulk deletes them by passing the channel along with the entityType to DeleteStreamed. // IDs for which the query ran successfully will be passed to onSuccess. func (db *DB) Delete( ctx context.Context, entityType Entity, ids []interface{}, onSuccess ...OnSuccess[any], ) error { idsCh := make(chan interface{}, len(ids)) for _, id := range ids { idsCh <- id } close(idsCh) return db.DeleteStreamed(ctx, entityType, idsCh, onSuccess...) } // ExecTx executes the provided function within a database transaction. // // Starts a new transaction, executes the provided function, and commits the transaction // if the function succeeds. If the function returns an error, the transaction is rolled back. // // Returns an error if starting the transaction, executing the function, or committing the transaction fails. // // Note that committing the transaction may not honor the context provided. For some database drivers, once a COMMIT // query is started, it will block until the database responds. Therefore, for time-critical scenarios, it is // recommended to add a select wrapper against the context. func (db *DB) ExecTx(ctx context.Context, fn func(context.Context, *sqlx.Tx) error) error { tx, err := db.BeginTxx(ctx, nil) if err != nil { return errors.Wrap(err, "can't start transaction") } // We don't expect meaningful errors from rolling back the tx other than the sql.ErrTxDone, so just ignore it. defer func() { _ = tx.Rollback() }() if err := fn(ctx, tx); err != nil { return errors.WithStack(err) } if err := tx.Commit(); err != nil { return errors.Wrap(err, "can't commit transaction") } return nil } func (db *DB) GetSemaphoreForTable(table string) *semaphore.Weighted { db.tableSemaphoresMu.Lock() defer db.tableSemaphoresMu.Unlock() if sem, ok := db.tableSemaphores[table]; ok { return sem } else { sem = semaphore.NewWeighted(int64(db.Options.MaxConnectionsPerTable)) db.tableSemaphores[table] = sem return sem } } // HasTable checks whether a table is present in the database. // // The first return value indicates whether a table of the given name exists. The second return value contains any // errors that occurred during the check. If the error is not nil, the first argument is always false. func (db *DB) HasTable(ctx context.Context, table string) (bool, error) { var tableSchemaFunc string switch db.DriverName() { case MySQL: tableSchemaFunc = "DATABASE()" case PostgreSQL: tableSchemaFunc = "CURRENT_SCHEMA()" default: return false, errors.Errorf("unsupported database driver %q", db.DriverName()) } var hasTable bool err := retry.WithBackoff( ctx, func(ctx context.Context) error { query := db.Rebind("SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA=" + tableSchemaFunc + " AND TABLE_NAME=?") rows, err := db.QueryContext(ctx, query, table) if err != nil { return CantPerformQuery(err, query) } defer func() { _ = rows.Close() }() hasTable = rows.Next() if err := rows.Close(); err != nil { return err } return rows.Err() }, retry.Retryable, backoff.DefaultBackoff, db.GetDefaultRetrySettings()) if err != nil { return false, errors.Wrapf(err, "can't verify existence of database table %q", table) } return hasTable, nil } func (db *DB) GetDefaultRetrySettings() retry.Settings { return retry.Settings{ Timeout: retry.DefaultTimeout, OnRetryableError: func(elapsed time.Duration, attempt uint64, err, lastErr error) { db.logger.Warnw("Can't execute query. Retrying", zap.Error(err), zap.Duration("after", elapsed), zap.Uint64("attempt", attempt)) }, OnSuccess: func(elapsed time.Duration, attempt uint64, lastErr error) { if attempt > 1 { db.logger.Infow("Query retried successfully after error", zap.Duration("after", elapsed), zap.Uint64("attempts", attempt), zap.NamedError("recovered_error", lastErr)) } }, } } func (db *DB) Log(ctx context.Context, query string, counter *com.Counter) periodic.Stopper { return periodic.Start(ctx, db.logger.Interval(), func(tick periodic.Tick) { if count := counter.Reset(); count > 0 { db.logger.Debugf("Executed %q with %d rows", query, count) } }, periodic.OnStop(func(tick periodic.Tick) { db.logger.Debugf("Finished executing %q with %d rows in %s", query, counter.Total(), tick.Elapsed) })) } var ( // Assert TxOrDB interface compliance of the DB and sqlx.Tx types. _ TxOrDB = (*DB)(nil) _ TxOrDB = (*sqlx.Tx)(nil) ) dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/database/contracts.go0000644000000000000000000000374315024302466027311 0ustar rootrootpackage database import ( "context" "github.com/jmoiron/sqlx" ) // Entity is implemented by each type that works with the database package. type Entity interface { Fingerprinter IDer } // Fingerprinter is implemented by every entity that uniquely identifies itself. type Fingerprinter interface { // Fingerprint returns the value that uniquely identifies the entity. Fingerprint() Fingerprinter } // ID is a unique identifier of an entity. type ID interface { // String returns the string representation form of the ID. // The String method is used to use the ID in functions // where it needs to be compared or hashed. String() string } // IDer is implemented by every entity that uniquely identifies itself. type IDer interface { ID() ID // ID returns the ID. SetID(ID) // SetID sets the ID. } // EntityFactoryFunc knows how to create an Entity. type EntityFactoryFunc func() Entity // Upserter implements the Upsert method, // which returns a part of the object for ON DUPLICATE KEY UPDATE. type Upserter interface { Upsert() any // Upsert partitions the object. } // TableNamer implements the TableName method, // which returns the table of the object. type TableNamer interface { TableName() string // TableName tells the table. } // Scoper implements the Scope method, // which returns a struct specifying the WHERE conditions that // entities must satisfy in order to be SELECTed. type Scoper interface { Scope() any } // PgsqlOnConflictConstrainter implements the PgsqlOnConflictConstraint method, // which returns the primary or unique key constraint name of the PostgreSQL table. type PgsqlOnConflictConstrainter interface { // PgsqlOnConflictConstraint returns the primary or unique key constraint name of the PostgreSQL table. PgsqlOnConflictConstraint() string } // TxOrDB is just a helper interface that can represent a *[sqlx.Tx] or *[DB] instance. type TxOrDB interface { sqlx.ExtContext PrepareNamedContext(ctx context.Context, query string) (*sqlx.NamedStmt, error) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/utils/0000755000000000000000000000000015024302466024347 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/utils/utils_test.go0000644000000000000000000001632515024302466027104 0ustar rootrootpackage utils import ( "context" "encoding/hex" "fmt" "github.com/go-sql-driver/mysql" "github.com/lib/pq" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" "time" ) func TestBatchSliceOfStrings(t *testing.T) { subtests := []struct { name string keys []string count int output [][]string }{ {"nil", nil, 1, nil}, {"empty", make([]string, 0, 1), 1, nil}, {"a", []string{"a"}, 1, [][]string{{"a"}}}, {"a2", []string{"a"}, 2, [][]string{{"a"}}}, {"a_b", []string{"a", "b"}, 1, [][]string{{"a"}, {"b"}}}, {"ab", []string{"a", "b"}, 2, [][]string{{"a", "b"}}}, {"ab3", []string{"a", "b"}, 3, [][]string{{"a", "b"}}}, {"a_b_c", []string{"a", "b", "c"}, 1, [][]string{{"a"}, {"b"}, {"c"}}}, {"ab_c", []string{"a", "b", "c"}, 2, [][]string{{"a", "b"}, {"c"}}}, {"abc", []string{"a", "b", "c"}, 3, [][]string{{"a", "b", "c"}}}, {"abc4", []string{"a", "b", "c"}, 4, [][]string{{"a", "b", "c"}}}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { batches := BatchSliceOfStrings(context.Background(), st.keys, st.count) require.NotNil(t, batches) for _, expected := range st.output { select { case actual, ok := <-batches: require.True(t, ok, "receiving should return a value") require.Equal(t, expected, actual) case <-time.After(10 * time.Millisecond): require.Fail(t, "receiving should not block") } } select { case _, ok := <-batches: require.False(t, ok, "receiving from channel should not return anything") case <-time.After(10 * time.Millisecond): require.Fail(t, "receiving should not block") } }) } for _, i := range []int{0, -1, -2, -30} { t.Run(fmt.Sprint(i), func(t *testing.T) { require.Panics(t, func() { BatchSliceOfStrings(context.Background(), nil, i) }) }) } } func TestChecksum(t *testing.T) { subtests := []struct { name string input any output string }{ {"empty_string", "", "da39a3ee5e6b4b0d3255bfef95601890afd80709"}, {"empty_bytes", []byte(nil), "da39a3ee5e6b4b0d3255bfef95601890afd80709"}, {"space_string", " ", "b858cb282617fb0956d960215c8e84d1ccf909c6"}, {"space_bytes", []byte(" "), "b858cb282617fb0956d960215c8e84d1ccf909c6"}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, st.output, hex.EncodeToString(Checksum(st.input))) }) } unsupported := []struct { name string input any }{ {"nil", nil}, {"bool", false}, {"int", 0}, {"float", 0.0}, {"struct", struct{}{}}, {"slice", []string{}}, {"map", map[string]string{}}, } for _, st := range unsupported { t.Run(st.name, func(t *testing.T) { require.Panics(t, func() { Checksum(st.input) }) }) } } func TestIsDeadlock(t *testing.T) { msg := "Unsuccessful attempt of confusing the tested code." code := [5]byte{0, 23, 42, 77, 255} subtests := []struct { name string input error output bool }{ {"nil", nil, false}, {"deadline", context.DeadlineExceeded, false}, {"mysql1204", &mysql.MySQLError{Number: 1204}, false}, {"mysql1205", &mysql.MySQLError{Number: 1205}, true}, {"mysql1205_with_crap", &mysql.MySQLError{Number: 1205, SQLState: code, Message: msg}, true}, {"mysql1206", &mysql.MySQLError{Number: 1206}, false}, {"mysql1212", &mysql.MySQLError{Number: 1212}, false}, {"mysql1213", &mysql.MySQLError{Number: 1213}, true}, {"mysql1213_with_crap", &mysql.MySQLError{Number: 1213, SQLState: code, Message: msg}, true}, {"mysql1214", &mysql.MySQLError{Number: 1214}, false}, {"postgres40000", &pq.Error{Code: "40000"}, false}, {"postgres40001", &pq.Error{Code: "40001"}, true}, {"postgres40001_with_crap", &pq.Error{Code: "40001", Message: msg}, true}, {"postgres40002", &pq.Error{Code: "40002"}, false}, {"postgres40P01", &pq.Error{Code: "40P01"}, true}, {"postgres40P01_with_crap", &pq.Error{Code: "40P01", Message: msg}, true}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, st.output, IsDeadlock(st.input)) }) } } func TestEllipsize(t *testing.T) { subtests := []struct { name string s string limit int output string }{ {"negative", "", -1, "..."}, {"empty", "", 0, ""}, {"shorter", " ", 2, " "}, {"equal", " ", 1, " "}, {"longer", " ", 0, "..."}, {"unicode", "äöü߀", 4, "ä..."}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, st.output, Ellipsize(st.s, st.limit)) }) } } func TestIsUnixAddr(t *testing.T) { subtests := []struct { name string input string output bool }{ {"empty", "", false}, {"slash", "/", true}, {"unix", "/tmp/sock", true}, {"ipv4", "192.0.2.1", false}, {"ipv6", "2001:db8::", false}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, st.output, IsUnixAddr(st.input)) }) } } func TestJoinHostPort(t *testing.T) { subtests := []struct { name string host string port int output string }{ {"empty", "", 0, ":0"}, {"ipv4", "192.0.2.1", 80, "192.0.2.1:80"}, {"ipv6", "2001:db8::", 443, "[2001:db8::]:443"}, {"unix", "/tmp/sock", 5665, "/tmp/sock"}, } for _, st := range subtests { t.Run(st.name, func(t *testing.T) { require.Equal(t, st.output, JoinHostPort(st.host, st.port)) }) } } func TestChanFromSlice(t *testing.T) { t.Run("Nil", func(t *testing.T) { ch := ChanFromSlice[int](nil) require.NotNil(t, ch) requireClosedEmpty(t, ch) }) t.Run("Empty", func(t *testing.T) { ch := ChanFromSlice([]int{}) require.NotNil(t, ch) requireClosedEmpty(t, ch) }) t.Run("NonEmpty", func(t *testing.T) { ch := ChanFromSlice([]int{42, 23, 1337}) require.NotNil(t, ch) requireReceive(t, ch, 42) requireReceive(t, ch, 23) requireReceive(t, ch, 1337) requireClosedEmpty(t, ch) }) } // requireReceive is a helper function to check if a value can immediately be received from a channel. func requireReceive(t *testing.T, ch <-chan int, expected int) { t.Helper() select { case v, ok := <-ch: require.True(t, ok, "receiving should return a value") require.Equal(t, expected, v) default: require.Fail(t, "receiving should not block") } } // requireReceive is a helper function to check if the channel is closed and empty. func requireClosedEmpty(t *testing.T, ch <-chan int) { t.Helper() select { case _, ok := <-ch: require.False(t, ok, "receiving from channel should not return anything") default: require.Fail(t, "receiving should not block") } } func TestIterateOrderedMap(t *testing.T) { tests := []struct { name string in map[int]string outKeys []int }{ {"empty", map[int]string{}, nil}, {"single", map[int]string{1: "foo"}, []int{1}}, {"few-numbers", map[int]string{1: "a", 2: "b", 3: "c"}, []int{1, 2, 3}}, { "1k-numbers", func() map[int]string { m := make(map[int]string) for i := 0; i < 1000; i++ { m[i] = "foo" } return m }(), func() []int { keys := make([]int, 1000) for i := 0; i < 1000; i++ { keys[i] = i } return keys }(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var outKeys []int for k, v := range IterateOrderedMap(tt.in) { assert.Equal(t, tt.in[k], v) outKeys = append(outKeys, k) } assert.Equal(t, tt.outKeys, outKeys) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/utils/utils.go0000644000000000000000000001070415024302466026040 0ustar rootrootpackage utils import ( "cmp" "context" "crypto/sha1" // #nosec G505 -- Blocklisted import crypto/sha1 "fmt" "github.com/go-sql-driver/mysql" "github.com/lib/pq" "github.com/pkg/errors" "golang.org/x/exp/utf8string" "iter" "net" "os" "path/filepath" "slices" "strings" "time" ) // Timed calls the given callback with the time that has elapsed since the start. // // Timed should be installed by defer: // // func TimedExample(logger *zap.SugaredLogger) { // defer utils.Timed(time.Now(), func(elapsed time.Duration) { // logger.Debugf("Executed job in %s", elapsed) // }) // job() // } func Timed(start time.Time, callback func(elapsed time.Duration)) { callback(time.Since(start)) } // BatchSliceOfStrings groups the given keys into chunks of size count and streams them into a returned channel. // Panics if count is less than or equal to zero. func BatchSliceOfStrings(ctx context.Context, keys []string, count int) <-chan []string { if count <= 0 { panic("chunk size must be greater than zero") } batches := make(chan []string) go func() { defer close(batches) for i := 0; i < len(keys); i += count { end := i + count if end > len(keys) { end = len(keys) } select { case batches <- keys[i:end]: case <-ctx.Done(): return } } }() return batches } // IsContextCanceled returns whether the given error is context.Canceled. func IsContextCanceled(err error) bool { return errors.Is(err, context.Canceled) } // Checksum returns the SHA-1 checksum of the data. func Checksum(data interface{}) []byte { var chksm [sha1.Size]byte switch data := data.(type) { case string: // #nosec G401 -- Use of weak cryptographic primitive - we don't intend to change this anytime soon. chksm = sha1.Sum([]byte(data)) case []byte: // #nosec G401 -- Use of weak cryptographic primitive - we don't intend to change this anytime soon. chksm = sha1.Sum(data) default: panic(fmt.Sprintf("Unable to create checksum for type %T", data)) } return chksm[:] } // IsDeadlock returns whether the given error signals serialization failure. func IsDeadlock(err error) bool { var e *mysql.MySQLError if errors.As(err, &e) { switch e.Number { case 1205, 1213: return true default: return false } } var pe *pq.Error if errors.As(err, &pe) { switch pe.Code { case "40001", "40P01": return true } } return false } var ellipsis = utf8string.NewString("...") // Ellipsize shortens s to <=limit runes and indicates shortening by "...". func Ellipsize(s string, limit int) string { utf8 := utf8string.NewString(s) switch { case utf8.RuneCount() <= limit: return s case utf8.RuneCount() <= ellipsis.RuneCount(): return ellipsis.String() default: return utf8.Slice(0, limit-ellipsis.RuneCount()) + ellipsis.String() } } // AppName returns the name of the executable that started this program (process). func AppName() string { exe, err := os.Executable() if err != nil { exe = os.Args[0] } return filepath.Base(exe) } // IsUnixAddr indicates whether the given host string represents a Unix socket address. // // A host string that begins with a forward slash ('/') is considered Unix socket address. func IsUnixAddr(host string) bool { return strings.HasPrefix(host, "/") } // JoinHostPort is like its equivalent in net., but handles UNIX sockets as well. func JoinHostPort(host string, port int) string { if IsUnixAddr(host) { return host } return net.JoinHostPort(host, fmt.Sprint(port)) } // ChanFromSlice takes a slice of values and returns a channel from which these values can be received. // This channel is closed after the last value was sent. func ChanFromSlice[T any](values []T) <-chan T { ch := make(chan T, len(values)) for _, value := range values { ch <- value } close(ch) return ch } // PrintErrorThenExit prints the given error to [os.Stderr] and exits with the specified error code. func PrintErrorThenExit(err error, exitCode int) { fmt.Fprintln(os.Stderr, err) os.Exit(exitCode) } // IterateOrderedMap implements iter.Seq2 to iterate over a map in the key's order. // // This function returns a func yielding key-value-pairs from a given map in the order of their keys, if their type // is cmp.Ordered. func IterateOrderedMap[K cmp.Ordered, V any](m map[K]V) iter.Seq2[K, V] { keys := make([]K, 0, len(m)) for key := range m { keys = append(keys, key) } slices.Sort(keys) return func(yield func(K, V) bool) { for _, key := range keys { if !yield(key, m[key]) { return } } } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/.golangci.yml0000644000000000000000000000160615024302466025576 0ustar rootrootversion: "2" linters: default: standard enable: - bidichk # dangerous Unicode chars - bodyclose # HTTP response body is closed - exptostd # functions from golang.org/x/exp/ that can be replaced by std functions - forcetypeassert # forced type assertions - gosec # security problems - loggercheck # key value pairs for common logger libraries (including zap) - misspell # commonly misspelled English words - nosprintfhostport # misuse of Sprintf to construct a host with port in a URL - rowserrcheck # Rows.Err of rows is checked - sqlclosecheck # sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed settings: staticcheck: checks: - all - '-ST1000' # ignore missing package comments - '-ST1003' # ignore capitalization in camel case names - '-QF1001' # don't suggest De Morgan's law for boolean expressions dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/logging/0000755000000000000000000000000015024302466024635 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/logging/config.go0000644000000000000000000000577315024302466026445 0ustar rootrootpackage logging import ( "fmt" "github.com/creasty/defaults" "github.com/pkg/errors" "go.uber.org/zap/zapcore" "os" "strings" "time" ) // Options define child loggers with their desired log level. type Options map[string]zapcore.Level // UnmarshalText implements encoding.TextUnmarshaler to allow Options to be parsed by env. // // This custom TextUnmarshaler is necessary as - for the moment - env does not support map[T]encoding.TextUnmarshaler. // After got merged and a new env release was drafted, this method can be // removed. func (o *Options) UnmarshalText(text []byte) error { optionsMap := make(map[string]zapcore.Level) for _, entry := range strings.Split(string(text), ",") { key, valueStr, found := strings.Cut(entry, ":") if !found { return fmt.Errorf("entry %q cannot be unmarshalled as an Option entry", entry) } valueLvl, err := zapcore.ParseLevel(valueStr) if err != nil { return fmt.Errorf("entry %q cannot be unmarshalled as level, %w", entry, err) } optionsMap[key] = valueLvl } *o = optionsMap return nil } // UnmarshalYAML implements yaml.InterfaceUnmarshaler to allow Options to be parsed go-yaml. func (o *Options) UnmarshalYAML(unmarshal func(any) error) error { optionsMap := make(map[string]zapcore.Level) if err := unmarshal(&optionsMap); err != nil { return err } *o = optionsMap return nil } // Config defines Logger configuration. type Config struct { // zapcore.Level at 0 is for info level. Level zapcore.Level `yaml:"level" env:"LEVEL" default:"0"` Output string `yaml:"output" env:"OUTPUT"` // Interval for periodic logging. Interval time.Duration `yaml:"interval" env:"INTERVAL" default:"20s"` Options Options `yaml:"options" env:"OPTIONS"` } // SetDefaults implements defaults.Setter to configure the log output if it is not set: // systemd-journald is used when Icinga DB is running under systemd, otherwise stderr. func (c *Config) SetDefaults() { if defaults.CanUpdate(c.Output) { if _, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { // When started by systemd, NOTIFY_SOCKET is set by systemd for Type=notify supervised services, // which is the default setting for the Icinga DB service. // This assumes that Icinga DB is running under systemd, so set output to systemd-journald. c.Output = JOURNAL } else { // Otherwise set it to console, i.e. write log messages to stderr. c.Output = CONSOLE } } } // Validate checks constraints in the configuration and returns an error if they are violated. func (c *Config) Validate() error { if c.Interval <= 0 { return errors.New("periodic logging interval must be positive") } return AssertOutput(c.Output) } // AssertOutput returns an error if output is not a valid logger output. func AssertOutput(o string) error { if o == CONSOLE || o == JOURNAL { return nil } return invalidOutput(o) } func invalidOutput(o string) error { return fmt.Errorf("%s is not a valid logger output. Must be either %q or %q", o, CONSOLE, JOURNAL) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/logging/journald_core_test.go0000644000000000000000000001147115024302466031055 0ustar rootrootpackage logging import ( "fmt" "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zapcore" "regexp" "testing" ) func Test_journaldFieldEncode(t *testing.T) { tests := []struct { name string input string output string }{ {"empty", "", "EMPTY_KEY"}, {"lowercase", "foo", "FOO"}, {"uppercase", "FOO", "FOO"}, {"dash", "foo-bar", "FOO_BAR"}, {"non ascii", "snow_☃", "SNOW__"}, {"lowercase non ascii alpha", "föö", "F__"}, {"uppercase non ascii alpha", "FÖÖ", "F__"}, {"leading number", "23", "ESC_23"}, {"leading underscore", "_foo", "ESC__FOO"}, {"leading invalid", " foo", "ESC__FOO"}, {"max length", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1234", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1234"}, {"too long", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA12345", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1234"}, {"too long leading number", "1234567890123456789012345678901234567890123456789012345678901234", "ESC_123456789012345678901234567890123456789012345678901234567890"}, {"concrete example", "icinga-notifications" + "_" + "error", "ICINGA_NOTIFICATIONS_ERROR"}, {"example syslog_identifier", "SYSLOG_IDENTIFIER", "SYSLOG_IDENTIFIER"}, } check := regexp.MustCompile(`^[A-Z][A-Z0-9_]{0,63}$`) for _, test := range tests { t.Run(test.name, func(t *testing.T) { out := encodeJournaldFieldKey(test.input) require.Equal(t, test.output, out) require.True(t, check.MatchString(out), "check regular expression") }) } } // testingStackError is an error mimicking the stack behavior from github.com/pkg/errors in a deterministic way. type testingStackError string func (err testingStackError) Error() string { return string(err) } func (err testingStackError) Format(s fmt.State, verb rune) { if verb == 'v' && s.Flag('+') { _, _ = fmt.Fprintf(s, "%s: look, I am a stack trace", string(err)) } else { _, _ = fmt.Fprintf(s, "%s", string(err)) } } func Test_visibleFieldsMsg(t *testing.T) { tests := []struct { name string visibleFieldKeys map[string]struct{} fields []zapcore.Field output string }{ { name: "empty-all-nil", visibleFieldKeys: nil, fields: nil, output: "", }, { name: "empty-all", visibleFieldKeys: map[string]struct{}{}, fields: nil, output: "", }, { name: "empty-visibleFiledKeys", visibleFieldKeys: map[string]struct{}{}, fields: []zapcore.Field{zap.String("foo", "bar")}, output: "", }, { name: "no-field-match", visibleFieldKeys: map[string]struct{}{"bar": {}}, fields: []zapcore.Field{zap.String("foo", "bar")}, output: "", }, { name: "expected-string", visibleFieldKeys: map[string]struct{}{"foo": {}}, fields: []zapcore.Field{zap.String("foo", "bar")}, output: "\t" + `foo="bar"`, }, { name: "expected-multiple-strings-with-excluded", visibleFieldKeys: map[string]struct{}{"foo": {}, "bar": {}}, fields: []zapcore.Field{ zap.String("foo", "bar"), zap.String("bar", "baz"), zap.String("baz", "qux"), // not in allow list }, output: "\t" + `bar="baz", foo="bar"`, }, { name: "expected-error-simple", visibleFieldKeys: map[string]struct{}{"error": {}}, fields: []zapcore.Field{zap.Error(fmt.Errorf("oops"))}, output: "\t" + `error="oops"`, }, { name: "expected-error-without-stack", visibleFieldKeys: map[string]struct{}{"error": {}}, fields: []zapcore.Field{zap.Error(errors.WithStack(fmt.Errorf("oops")))}, output: "\t" + `error="oops"`, }, { name: "expected-error-with-stack", visibleFieldKeys: map[string]struct{}{"error": {}, "errorVerbose": {}}, fields: []zapcore.Field{zap.Error(testingStackError("oops"))}, output: "\t" + `error="oops", errorVerbose="oops: look, I am a stack trace"`, }, { name: "expected-multiple-basic-types", visibleFieldKeys: map[string]struct{}{ "bool": {}, "byte-string": {}, "complex": {}, "float": {}, "int": {}, }, fields: []zapcore.Field{ zap.Bool("bool", true), zap.ByteString("byte-string", []byte{0xC0, 0xFF, 0xEE}), zap.Complex64("complex", -1i), zap.Float64("float", 1.0/3.0), zap.Int("int", 42), }, output: "\t" + `bool="true", byte-string="\xc0\xff\xee", complex="(0-1i)", float="0.3333333333333333", int="42"`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { out := visibleFieldsMsg(test.visibleFieldKeys, test.fields) require.Equal(t, test.output, out) }) } } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/logging/logging.go0000644000000000000000000000730715024302466026621 0ustar rootrootpackage logging import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" "os" "sync" "time" ) const ( CONSOLE = "console" JOURNAL = "systemd-journald" ) // defaultEncConfig defines the default zapcore.EncoderConfig for the logging package. var defaultEncConfig = zapcore.EncoderConfig{ TimeKey: "ts", LevelKey: "level", NameKey: "logger", CallerKey: "caller", MessageKey: "msg", StacktraceKey: "stacktrace", LineEnding: zapcore.DefaultLineEnding, EncodeLevel: zapcore.CapitalLevelEncoder, EncodeTime: zapcore.ISO8601TimeEncoder, EncodeDuration: zapcore.StringDurationEncoder, EncodeCaller: zapcore.ShortCallerEncoder, } // Logging implements access to a default logger and named child loggers. // Log levels can be configured per named child via Options which, if not configured, // fall back on a default log level. // Logs either to the console or to systemd-journald. type Logging struct { logger *Logger output string verbosity zap.AtomicLevel interval time.Duration // coreFactory creates zapcore.Core based on the log level and the log output. coreFactory func(zap.AtomicLevel) zapcore.Core mu sync.Mutex loggers map[string]*Logger options Options } // NewLogging takes the name and log level for the default logger, // output where log messages are written to, // options having log levels for named child loggers // and returns a new Logging. func NewLogging(name string, level zapcore.Level, output string, options Options, interval time.Duration) (*Logging, error) { verbosity := zap.NewAtomicLevelAt(level) var coreFactory func(zap.AtomicLevel) zapcore.Core switch output { case CONSOLE: enc := zapcore.NewConsoleEncoder(defaultEncConfig) ws := zapcore.Lock(os.Stderr) coreFactory = func(verbosity zap.AtomicLevel) zapcore.Core { return zapcore.NewCore(enc, ws, verbosity) } case JOURNAL: coreFactory = func(verbosity zap.AtomicLevel) zapcore.Core { return NewJournaldCore(name, verbosity) } default: return nil, invalidOutput(output) } logger := NewLogger(zap.New(coreFactory(verbosity)).Named(name).Sugar(), interval) return &Logging{ logger: logger, output: output, verbosity: verbosity, interval: interval, coreFactory: coreFactory, loggers: make(map[string]*Logger), options: options, }, nil } // NewLoggingFromConfig returns a new Logging from Config. func NewLoggingFromConfig(name string, c Config) (*Logging, error) { return NewLogging(name, c.Level, c.Output, c.Options, c.Interval) } // GetChildLogger returns a named child logger. // Log levels for named child loggers are obtained from the logging options and, if not found, // set to the default log level. func (l *Logging) GetChildLogger(name string) *Logger { l.mu.Lock() defer l.mu.Unlock() if logger, ok := l.loggers[name]; ok { return logger } var verbosity zap.AtomicLevel if level, found := l.options[name]; found { verbosity = zap.NewAtomicLevelAt(level) } else { verbosity = l.verbosity } logger := NewLogger(zap.New(l.coreFactory(verbosity)).Named(name).Sugar(), l.interval) l.loggers[name] = logger return logger } // GetLogger returns the default logger. func (l *Logging) GetLogger() *Logger { return l.logger } // ForceLog results in every message being logged. // // This [zap.Option] is the opposite of [zap.IncreaseLevel], it just decreases the log level to debug. Since zap's // architecture does not allow this with the same [zapcore.Core], it replaces the core with a freshly created one from // the Logging's core factory. func (l *Logging) ForceLog() zap.Option { return zap.WrapCore(func(_ zapcore.Core) zapcore.Core { return l.coreFactory(zap.NewAtomicLevelAt(zapcore.DebugLevel)) }) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/logging/error.go0000644000000000000000000000212315024302466026313 0ustar rootrootpackage logging import ( "github.com/pkg/errors" "go.uber.org/zap" ) // stackTracer is an interface used to identify errors that include a stack trace. // This interface specifically targets errors created using the github.com/pkg/errors library, // which can add stack traces to errors with functions like errors.Wrap(). type stackTracer interface { StackTrace() errors.StackTrace } // errNoStackTrace is a wrapper for errors that implements the error interface without exposing a stack trace. type errNoStackTrace struct { e error } // Error returns the error message of the wrapped error. func (e errNoStackTrace) Error() string { return e.e.Error() } // Error returns a zap.Field for logging the provided error. // This function checks if the error includes a stack trace from the pkg/errors library. // If a stack trace is present, it is suppressed in the log output because // logging a stack trace is not necessary. Otherwise, the error is logged normally. func Error(e error) zap.Field { if _, ok := e.(stackTracer); ok { return zap.Error(errNoStackTrace{e}) } return zap.Error(e) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/logging/config_test.go0000644000000000000000000000564715024302466027504 0ustar rootrootpackage logging import ( "fmt" "github.com/creasty/defaults" "github.com/icinga/icinga-go-library/config" "github.com/icinga/icinga-go-library/testutils" "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" "os" "testing" "time" ) func TestConfig(t *testing.T) { var defaultConfig Config require.NoError(t, defaults.Set(&defaultConfig), "setting default config") configTests := []testutils.TestCase[Config, testutils.ConfigTestData]{ { Name: "Defaults", Data: testutils.ConfigTestData{ // An empty YAML file causes an error, // so specify a valid key without a value to trigger fallback to the default. Yaml: `level:`, }, Expected: defaultConfig, }, { Name: "periodic logging interval must be positive", Data: testutils.ConfigTestData{ Yaml: `interval: 0s`, Env: map[string]string{"INTERVAL": "0s"}, }, Error: testutils.ErrorContains("periodic logging interval must be positive"), }, { Name: "invalid logger output", Data: testutils.ConfigTestData{ Yaml: `output: invalid`, Env: map[string]string{"OUTPUT": "invalid"}, }, Error: testutils.ErrorContains("invalid is not a valid logger output"), }, { Name: "Customized", Data: testutils.ConfigTestData{ Yaml: fmt.Sprintf( ` level: debug output: %s interval: 3m14s`, JOURNAL, ), Env: map[string]string{ "LEVEL": zapcore.DebugLevel.String(), "OUTPUT": JOURNAL, "INTERVAL": "3m14s", }, }, Expected: Config{ Level: zapcore.DebugLevel, Output: JOURNAL, Interval: 3*time.Minute + 14*time.Second, }, }, { Name: "Options", Data: testutils.ConfigTestData{ Yaml: ` options: foo: debug bar: info buz: panic`, Env: map[string]string{"OPTIONS": "foo:debug,bar:info,buz:panic"}, }, Expected: Config{ Output: defaultConfig.Output, Interval: defaultConfig.Interval, Options: map[string]zapcore.Level{ "foo": zapcore.DebugLevel, "bar": zapcore.InfoLevel, "buz": zapcore.PanicLevel, }, }, }, { Name: "Options with invalid level", Data: testutils.ConfigTestData{ Yaml: ` options: foo: foo`, Env: map[string]string{"OPTIONS": "foo:foo"}, }, Error: testutils.ErrorContains(`unrecognized level: "foo"`), }, } t.Run("FromEnv", func(t *testing.T) { for _, tc := range configTests { t.Run(tc.Name, tc.F(func(data testutils.ConfigTestData) (Config, error) { var actual Config err := config.FromEnv(&actual, config.EnvOptions{Environment: data.Env}) return actual, err })) } }) t.Run("FromYAMLFile", func(t *testing.T) { for _, tc := range configTests { t.Run(tc.Name+"/FromYAMLFile", tc.F(func(data testutils.ConfigTestData) (Config, error) { var actual Config var err error testutils.WithYAMLFile(t, data.Yaml, func(file *os.File) { err = config.FromYAMLFile(file.Name(), &actual) }) return actual, err })) } }) } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/logging/logger.go0000644000000000000000000000100415024302466026436 0ustar rootrootpackage logging import ( "go.uber.org/zap" "time" ) // Logger wraps zap.SugaredLogger and // allows to get the interval for periodic logging. type Logger struct { *zap.SugaredLogger interval time.Duration } // NewLogger returns a new Logger. func NewLogger(base *zap.SugaredLogger, interval time.Duration) *Logger { return &Logger{ SugaredLogger: base, interval: interval, } } // Interval returns the interval for periodic logging. func (l *Logger) Interval() time.Duration { return l.interval } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/logging/journald_core.go0000644000000000000000000001471615024302466030023 0ustar rootrootpackage logging import ( "fmt" "github.com/icinga/icinga-go-library/strcase" "github.com/icinga/icinga-go-library/utils" "github.com/pkg/errors" "github.com/ssgreg/journald" "go.uber.org/zap/zapcore" "strings" ) // journaldPriorities maps zapcore.Level to journal.Priority. var journaldPriorities = map[zapcore.Level]journald.Priority{ zapcore.DebugLevel: journald.PriorityDebug, zapcore.InfoLevel: journald.PriorityInfo, zapcore.WarnLevel: journald.PriorityWarning, zapcore.ErrorLevel: journald.PriorityErr, zapcore.FatalLevel: journald.PriorityCrit, zapcore.PanicLevel: journald.PriorityCrit, zapcore.DPanicLevel: journald.PriorityCrit, } // journaldVisibleFields is a set (map to struct{}) of field keys being logged within the message for journald. var journaldVisibleFields = map[string]struct{}{ "error": {}, } // NewJournaldCore returns a zapcore.Core that sends log entries to systemd-journald and // uses the given identifier as a prefix for structured logging context that is sent as journal fields. func NewJournaldCore(identifier string, enab zapcore.LevelEnabler) zapcore.Core { return &journaldCore{ LevelEnabler: enab, identifier: identifier, } } type journaldCore struct { zapcore.LevelEnabler context []zapcore.Field identifier string } func (c *journaldCore) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { if c.Enabled(ent.Level) { return ce.AddCore(ent, c) } return ce } func (c *journaldCore) Sync() error { return nil } func (c *journaldCore) With(fields []zapcore.Field) zapcore.Core { cc := *c cc.context = append(cc.context[:len(cc.context):len(cc.context)], fields...) return &cc } func (c *journaldCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { pri, ok := journaldPriorities[ent.Level] if !ok { return errors.Errorf("unknown log level %q", ent.Level) } enc := zapcore.NewMapObjectEncoder() c.addFields(enc, fields) c.addFields(enc, c.context) enc.Fields["SYSLOG_IDENTIFIER"] = c.identifier // Re-encode keys before passing them to journald. Unfortunately, this cannot be done within addFields or at another // earlier position since zapcore's Field.AddTo may create multiple entries, some with non-compliant names. encFields := make(map[string]interface{}) for k, v := range enc.Fields { encFields[encodeJournaldFieldKey(k)] = v } message := ent.Message + visibleFieldsMsg(journaldVisibleFields, append(fields, c.context...)) if ent.LoggerName != c.identifier { message = ent.LoggerName + ": " + message } return journald.Send(message, pri, encFields) } // addFields adds all given fields to enc with an altered key, prefixed with the journaldCore.identifier. func (c *journaldCore) addFields(enc zapcore.ObjectEncoder, fields []zapcore.Field) { for _, field := range fields { field.Key = c.identifier + "_" + field.Key field.AddTo(enc) } } // encodeJournaldFieldKey alters a string to be used as a journald field key. // // When journald receives a field with an invalid key, it silently discards this field. This makes syntactically correct // keys a necessity. Unfortunately, there was no specific documentation about the field key syntax available. This // function follows the logic enforced in systemd's journal_field_valid function[0]. // // This boils down to: // - Key length MUST be within (0, 64] characters. // - Key MUST start with [A-Z]. // - Key characters MUST be [A-Z0-9_]. // // [0]: https://github.com/systemd/systemd/blob/11d5e2b5fbf9f6bfa5763fd45b56829ad4f0777f/src/libsystemd/sd-journal/journal-file.c#L1703 func encodeJournaldFieldKey(key string) string { if len(key) == 0 { // While this is definitely an error, panicking would be too destructive and silently dropping fields is against // the very idea of ensuring key conformity. return "EMPTY_KEY" } isAsciiUpper := func(r rune) bool { return 'A' <= r && r <= 'Z' } isAsciiDigit := func(r rune) bool { return '0' <= r && r <= '9' } keyParts := []rune(strcase.ScreamingSnake(key)) for i, r := range keyParts { if isAsciiUpper(r) || isAsciiDigit(r) || r == '_' { continue } keyParts[i] = '_' } key = string(keyParts) if !isAsciiUpper(rune(key[0])) { // Escape invalid leading characters with a generic "ESC_" prefix. This was seen as a safer choice instead of // iterating over the key and removing parts. key = "ESC_" + key } if len(key) > 64 { key = key[:64] } return key } // visibleFieldsMsg creates a string to be appended to the log message including fields to be explicitly printed. // // When logging against journald, the zapcore.Fields are used as journald fields, resulting in not being shown in the // default journalctl output (short). While this is documented in our docs, missing error messages are usually confusing // for end users. // // This method takes an allow list (set, map of keys to empty struct) of key to be displayed - there is the global // variable journaldVisibleFields; parameter for testing - and a slice of zapcore.Fields, creating an output string of // the allowed fields prefixed by a whitespace separator. If there are no fields to be logged, the returned string is // empty. So the function output can be appended to the output message without further checks. func visibleFieldsMsg(visibleFieldKeys map[string]struct{}, fields []zapcore.Field) string { if visibleFieldKeys == nil || fields == nil { return "" } enc := zapcore.NewMapObjectEncoder() for _, field := range fields { if _, shouldLog := visibleFieldKeys[field.Key]; shouldLog { field.AddTo(enc) } } // The internal zapcore.encodeError function[^0] can result in multiple fields. For example, an error type // implementing fmt.Formatter results in another "errorVerbose" field, containing the stack trace if the error was // created by github.com/pkg/errors including a stack[^1]. So the keys are checked again in the following loop. // // [^0]: https://github.com/uber-go/zap/blob/v1.27.0/zapcore/error.go#L47 // [^1]: https://pkg.go.dev/github.com/pkg/errors@v0.9.1#WithStack visibleFields := make([]string, 0, len(visibleFieldKeys)) for k, v := range utils.IterateOrderedMap(enc.Fields) { if _, shouldLog := visibleFieldKeys[k]; !shouldLog { continue } var encodedField string switch v.(type) { case string, []byte, error: encodedField = fmt.Sprintf("%s=%q", k, v) default: encodedField = fmt.Sprintf(`%s="%v"`, k, v) } visibleFields = append(visibleFields, encodedField) } if len(visibleFields) == 0 { return "" } return "\t" + strings.Join(visibleFields, ", ") } dependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/testutils/0000755000000000000000000000000015024302466025247 5ustar rootrootdependencies/pkg/mod/github.com/icinga/icinga-go-library@v0.7.2/testutils/testutils.go0000644000000000000000000000710015024302466027634 0ustar rootroot// Package testutils provides utilities for testing, including generic test case structures // and helper functions for error checking and temporary file handling. // // This package is designed to simplify the process of writing tests by providing reusable // components that handle common testing scenarios, such as comparing expected and actual results, // checking for specific error conditions, and managing temporary files. package testutils import ( "github.com/stretchr/testify/require" "os" "testing" ) // TestCase represents a generic test case structure. // It is parameterized by T, the type of the expected result, and D, the type of the test data. // This struct is useful for defining test cases with expected outcomes and associated data. type TestCase[T any, D any] struct { // Name is the identifier for the test case, used for reporting purposes. Name string // Expected is the anticipated result of the test. It should be left empty if an error is expected. Expected T // Data contains the input or configuration for the test case. Data D // Error is a function that checks the error returned by the test function, if an error is anticipated. Error func(*testing.T, error) } // F returns a test function that executes the logic of the test case, suitable for use with t.Run(). // It takes a function f that processes the test data and returns an actual result along with an error, if any. // After executing f, it verifies the actual result against the expected result or evaluates the error condition. func (tc TestCase[T, D]) F(f func(D) (T, error)) func(t *testing.T) { return func(t *testing.T) { actual, err := f(tc.Data) if tc.Error != nil { tc.Error(t, err) } else { require.NoError(t, err) require.Equal(t, tc.Expected, actual) } } } // ConfigTestData holds test data for loading and validating configuration from // both YAML files and environment variables. type ConfigTestData struct { // YAML file content to be tested. Yaml string // Environment variables to be used in the test. Env map[string]string } // ErrorAs returns a function that checks if the error is of a specific type T. // This is useful for verifying that an error matches a particular interface or concrete type. func ErrorAs[T error]() func(t *testing.T, err error) { return func(t *testing.T, err error) { var expected T require.ErrorAs(t, err, &expected) } } // ErrorContains returns a function that checks if the error message contains the expected substring. // This is useful for validating that an error message includes specific information. func ErrorContains(expected string) func(t *testing.T, err error) { return func(t *testing.T, err error) { require.ErrorContains(t, err, expected) } } // ErrorIs returns a function that checks if the error is equal to the expected error. // This is useful for confirming that an error is exactly the one anticipated. func ErrorIs(expected error) func(t *testing.T, err error) { return func(t *testing.T, err error) { require.ErrorIs(t, err, expected) } } // WithYAMLFile creates a temporary YAML file with the provided content and executes a function with the file. // It ensures the file is removed after the function execution, preventing resource leaks. // This utility is helpful for tests that require file-based configuration. func WithYAMLFile(t *testing.T, yaml string, f func(file *os.File)) { file, err := os.CreateTemp("", "*.yaml") require.NoError(t, err) defer func(name string) { _ = os.Remove(name) }(file.Name()) _, err = file.WriteString(yaml) require.NoError(t, err) require.NoError(t, file.Close()) f(file) } dependencies/pkg/mod/github.com/dgryski/0000775000000000000000000000000015024302470017254 5ustar rootrootdependencies/pkg/mod/github.com/dgryski/go-rendezvous@v0.0.0-20200823014737-9f7001d12a5f/0000755000000000000000000000000015024302470026036 5ustar rootrootdependencies/pkg/mod/github.com/dgryski/go-rendezvous@v0.0.0-20200823014737-9f7001d12a5f/LICENSE0000644000000000000000000000212115024302470027037 0ustar rootrootThe MIT License (MIT) Copyright (c) 2017-2020 Damian Gryski Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. dependencies/pkg/mod/github.com/dgryski/go-rendezvous@v0.0.0-20200823014737-9f7001d12a5f/rdv_test.go0000644000000000000000000000035515024302470030222 0ustar rootrootpackage rendezvous import ( "hash/fnv" "testing" ) func hashString(s string) uint64 { h := fnv.New64a() h.Write([]byte(s)) return h.Sum64() } func TestEmpty(t *testing.T) { r := New([]string{}, hashString) r.Lookup("hello") } dependencies/pkg/mod/github.com/dgryski/go-rendezvous@v0.0.0-20200823014737-9f7001d12a5f/rdv.go0000644000000000000000000000261715024302470027166 0ustar rootrootpackage rendezvous type Rendezvous struct { nodes map[string]int nstr []string nhash []uint64 hash Hasher } type Hasher func(s string) uint64 func New(nodes []string, hash Hasher) *Rendezvous { r := &Rendezvous{ nodes: make(map[string]int, len(nodes)), nstr: make([]string, len(nodes)), nhash: make([]uint64, len(nodes)), hash: hash, } for i, n := range nodes { r.nodes[n] = i r.nstr[i] = n r.nhash[i] = hash(n) } return r } func (r *Rendezvous) Lookup(k string) string { // short-circuit if we're empty if len(r.nodes) == 0 { return "" } khash := r.hash(k) var midx int var mhash = xorshiftMult64(khash ^ r.nhash[0]) for i, nhash := range r.nhash[1:] { if h := xorshiftMult64(khash ^ nhash); h > mhash { midx = i + 1 mhash = h } } return r.nstr[midx] } func (r *Rendezvous) Add(node string) { r.nodes[node] = len(r.nstr) r.nstr = append(r.nstr, node) r.nhash = append(r.nhash, r.hash(node)) } func (r *Rendezvous) Remove(node string) { // find index of node to remove nidx := r.nodes[node] // remove from the slices l := len(r.nstr) r.nstr[nidx] = r.nstr[l] r.nstr = r.nstr[:l] r.nhash[nidx] = r.nhash[l] r.nhash = r.nhash[:l] // update the map delete(r.nodes, node) moved := r.nstr[nidx] r.nodes[moved] = nidx } func xorshiftMult64(x uint64) uint64 { x ^= x >> 12 // a x ^= x << 25 // b x ^= x >> 27 // c return x * 2685821657736338717 } dependencies/pkg/mod/github.com/google/0000775000000000000000000000000015024302467017062 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/0000755000000000000000000000000015024302467021113 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/LICENSE0000644000000000000000000000270715024302467022126 0ustar rootrootCopyright (c) 2017 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/go.mod0000644000000000000000000000005115024302467022215 0ustar rootrootmodule github.com/google/go-cmp go 1.21 dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/README.md0000644000000000000000000000323215024302467022372 0ustar rootroot# Package for equality of Go values [![GoDev](https://img.shields.io/static/v1?label=godev&message=reference&color=00add8)][godev] [![Build Status](https://github.com/google/go-cmp/actions/workflows/test.yml/badge.svg?branch=master)][actions] This package is intended to be a more powerful and safer alternative to `reflect.DeepEqual` for comparing whether two values are semantically equal. The primary features of `cmp` are: * When the default behavior of equality does not suit the needs of the test, custom equality functions can override the equality operation. For example, an equality function may report floats as equal so long as they are within some tolerance of each other. * Types that have an `Equal` method may use that method to determine equality. This allows package authors to determine the equality operation for the types that they define. * If no custom equality functions are used and no `Equal` method is defined, equality is determined by recursively comparing the primitive kinds on both values, much like `reflect.DeepEqual`. Unlike `reflect.DeepEqual`, unexported fields are not compared by default; they result in panics unless suppressed by using an `Ignore` option (see `cmpopts.IgnoreUnexported`) or explicitly compared using the `AllowUnexported` option. See the [documentation][godev] for more information. This is not an official Google product. [godev]: https://pkg.go.dev/github.com/google/go-cmp/cmp [actions]: https://github.com/google/go-cmp/actions ## Install ``` go get -u github.com/google/go-cmp/cmp ``` ## License BSD - See [LICENSE][license] file [license]: https://github.com/google/go-cmp/blob/master/LICENSE dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/CONTRIBUTING.md0000644000000000000000000000171115024302467023344 0ustar rootroot# How to Contribute We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow. ## Contributor License Agreement Contributions to this project must be accompanied by a Contributor License Agreement. You (or your employer) retain the copyright to your contribution, this simply gives us permission to use and redistribute your contributions as part of the project. Head over to to see your current agreements on file or to sign a new one. You generally only need to submit a CLA once, so if you've already submitted one (even if it was for a different project), you probably don't need to do it again. ## Code reviews All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/.github/0000755000000000000000000000000015024302467022453 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/.github/workflows/0000755000000000000000000000000015024302467024510 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/.github/workflows/test.yml0000644000000000000000000000117615024302467026217 0ustar rootrooton: [push, pull_request] name: Test permissions: contents: read jobs: test: strategy: matrix: go-version: [1.21.x] os: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - name: Install Go uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 with: go-version: ${{ matrix.go-version }} - name: Checkout code uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Test run: go test -v -race ./... - name: Format if: matrix.go-version == '1.21.x' run: diff -u <(echo -n) <(gofmt -d .) dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/0000755000000000000000000000000015024302467021672 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/options.go0000644000000000000000000004607515024302467023730 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "fmt" "reflect" "regexp" "strings" "github.com/google/go-cmp/cmp/internal/function" ) // Option configures for specific behavior of [Equal] and [Diff]. In particular, // the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), // configure how equality is determined. // // The fundamental options may be composed with filters ([FilterPath] and // [FilterValues]) to control the scope over which they are applied. // // The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions // for creating options that may be used with [Equal] and [Diff]. type Option interface { // filter applies all filters and returns the option that remains. // Each option may only read s.curPath and call s.callTTBFunc. // // An Options is returned only if multiple comparers or transformers // can apply simultaneously and will only contain values of those types // or sub-Options containing values of those types. filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption } // applicableOption represents the following types: // // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { Option // apply executes the option, which may mutate s or panic. apply(s *state, vx, vy reflect.Value) } // coreOption represents the following types: // // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { Option isCore() } type core struct{} func (core) isCore() {} // Options is a list of [Option] values that also satisfies the [Option] interface. // Helper comparison packages may return an Options value when packing multiple // [Option] values into a single [Option]. When this package processes an Options, // it will be implicitly expanded into a flat list. // // Applying a filter on an Options is equivalent to applying that same filter // on all individual options held within. type Options []Option func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { for _, opt := range opts { switch opt := opt.filter(s, t, vx, vy); opt.(type) { case ignore: return ignore{} // Only ignore can short-circuit evaluation case validator: out = validator{} // Takes precedence over comparer or transformer case *comparer, *transformer, Options: switch out.(type) { case nil: out = opt case validator: // Keep validator case *comparer, *transformer, Options: out = Options{out, opt} // Conflicting comparers or transformers } } } return out } func (opts Options) apply(s *state, _, _ reflect.Value) { const warning = "ambiguous set of applicable options" const help = "consider using filters to ensure at most one Comparer or Transformer may apply" var ss []string for _, opt := range flattenOptions(nil, opts) { ss = append(ss, fmt.Sprint(opt)) } set := strings.Join(ss, "\n\t") panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) } func (opts Options) String() string { var ss []string for _, opt := range opts { ss = append(ss, fmt.Sprint(opt)) } return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) } // FilterPath returns a new [Option] where opt is only evaluated if filter f // returns true for the current [Path] in the value tree. // // This filter is called even if a slice element or map entry is missing and // provides an opportunity to ignore such cases. The filter function must be // symmetric such that the filter result is identical regardless of whether the // missing value is from x or y. // // The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or // a previously filtered [Option]. func FilterPath(f func(Path) bool, opt Option) Option { if f == nil { panic("invalid path filter function") } if opt := normalizeOption(opt); opt != nil { return &pathFilter{fnc: f, opt: opt} } return nil } type pathFilter struct { core fnc func(Path) bool opt Option } func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { if f.fnc(s.curPath) { return f.opt.filter(s, t, vx, vy) } return nil } func (f pathFilter) String() string { return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) } // FilterValues returns a new [Option] where opt is only evaluated if filter f, // which is a function of the form "func(T, T) bool", returns true for the // current pair of values being compared. If either value is invalid or // the type of the values is not assignable to T, then this filter implicitly // returns false. // // The filter function must be // symmetric (i.e., agnostic to the order of the inputs) and // deterministic (i.e., produces the same result when given the same inputs). // If T is an interface, it is possible that f is called with two values with // different concrete types that both implement T. // // The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or // a previously filtered [Option]. func FilterValues(f interface{}, opt Option) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { panic(fmt.Sprintf("invalid values filter function: %T", f)) } if opt := normalizeOption(opt); opt != nil { vf := &valuesFilter{fnc: v, opt: opt} if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { vf.typ = ti } return vf } return nil } type valuesFilter struct { core typ reflect.Type // T fnc reflect.Value // func(T, T) bool opt Option } func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { return nil } if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { return f.opt.filter(s, t, vx, vy) } return nil } func (f valuesFilter) String() string { return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) } // Ignore is an [Option] that causes all comparisons to be ignored. // This value is intended to be combined with [FilterPath] or [FilterValues]. // It is an error to pass an unfiltered Ignore option to [Equal]. func Ignore() Option { return ignore{} } type ignore struct{ core } func (ignore) isFiltered() bool { return false } func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } func (ignore) String() string { return "Ignore()" } // validator is a sentinel Option type to indicate that some options could not // be evaluated due to unexported fields, missing slice elements, or // missing map entries. Both values are validator only for unexported fields. type validator struct{ core } func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { if !vx.IsValid() || !vy.IsValid() { return validator{} } if !vx.CanInterface() || !vy.CanInterface() { return validator{} } return nil } func (validator) apply(s *state, vx, vy reflect.Value) { // Implies missing slice element or map entry. if !vx.IsValid() || !vy.IsValid() { s.report(vx.IsValid() == vy.IsValid(), 0) return } // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" var name string if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType isProtoMessage := func(t reflect.Type) bool { m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && m.Type.Out(0).Name() == "Message" } if isProtoMessage(t) { help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` } else if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" } else if t.Comparable() { help = "consider using cmpopts.EquateComparable to compare comparable Go types" } } else { // Unnamed type with unexported fields. Derive PkgPath from field. var pkgPath string for i := 0; i < t.NumField() && pkgPath == ""; i++ { pkgPath = t.Field(i).PkgPath } name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) } panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) } panic("not reachable") } // identRx represents a valid identifier according to the Go specification. const identRx = `[_\p{L}][_\p{L}\p{N}]*` var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) // Transformer returns an [Option] that applies a transformation function that // converts values of a certain type into that of another. // // The transformer f must be a function "func(T) R" that converts values of // type T to those of type R and is implicitly filtered to input values // assignable to T. The transformer must not mutate T in any way. // // To help prevent some cases of infinite recursive cycles applying the // same transform to the output of itself (e.g., in the case where the // input and output types are the same), an implicit filter is added such that // a transformer is applicable only if that exact transformer is not already // in the tail of the [Path] since the last non-[Transform] step. // For situations where the implicit filter is still insufficient, // consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], // which adds a filter to prevent the transformer from // being recursively applied upon itself. // // The name is a user provided label that is used as the [Transform.Name] in the // transformation [PathStep] (and eventually shown in the [Diff] output). // The name must be a valid identifier or qualified identifier in Go syntax. // If empty, an arbitrary name is used. func Transformer(name string, f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { panic(fmt.Sprintf("invalid transformer function: %T", f)) } if name == "" { name = function.NameOf(v) if !identsRx.MatchString(name) { name = "λ" // Lambda-symbol as placeholder name } } else if !identsRx.MatchString(name) { panic(fmt.Sprintf("invalid name: %q", name)) } tr := &transformer{name: name, fnc: reflect.ValueOf(f)} if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { tr.typ = ti } return tr } type transformer struct { core name string typ reflect.Type // T fnc reflect.Value // func(T) R } func (tr *transformer) isFiltered() bool { return tr.typ != nil } func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { for i := len(s.curPath) - 1; i >= 0; i-- { if t, ok := s.curPath[i].(Transform); !ok { break // Hit most recent non-Transform step } else if tr == t.trans { return nil // Cannot directly use same Transform } } if tr.typ == nil || t.AssignableTo(tr.typ) { return tr } return nil } func (tr *transformer) apply(s *state, vx, vy reflect.Value) { step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} vvx := s.callTRFunc(tr.fnc, vx, step) vvy := s.callTRFunc(tr.fnc, vy, step) step.vx, step.vy = vvx, vvy s.compareAny(step) } func (tr transformer) String() string { return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) } // Comparer returns an [Option] that determines whether two values are equal // to each other. // // The comparer f must be a function "func(T, T) bool" and is implicitly // filtered to input values assignable to T. If T is an interface, it is // possible that f is called with two values of different concrete types that // both implement T. // // The equality function must be: // - Symmetric: equal(x, y) == equal(y, x) // - Deterministic: equal(x, y) == equal(x, y) // - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { panic(fmt.Sprintf("invalid comparer function: %T", f)) } cm := &comparer{fnc: v} if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { cm.typ = ti } return cm } type comparer struct { core typ reflect.Type // T fnc reflect.Value // func(T, T) bool } func (cm *comparer) isFiltered() bool { return cm.typ != nil } func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { if cm.typ == nil || t.AssignableTo(cm.typ) { return cm } return nil } func (cm *comparer) apply(s *state, vx, vy reflect.Value) { eq := s.callTTBFunc(cm.fnc, vx, vy) s.report(eq, reportByFunc) } func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } // Exporter returns an [Option] that specifies whether [Equal] is allowed to // introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal // implementation of some external package may cause the result of [Equal] // to unexpectedly change. However, it may be valid to use this option on types // defined in an internal package where the semantic meaning of an unexported // field is in the control of the user. // // In many cases, a custom [Comparer] should be used instead that defines // equality as a function of the public API of a type rather than the underlying // unexported implementation. // // For example, the [reflect.Type] documentation defines equality to be determined // by the == operator on the interface (essentially performing a shallow pointer // comparison) and most attempts to compare *[regexp.Regexp] types are interested // in only checking that the regular expression strings are equal. // Both of these are accomplished using [Comparer] options: // // Comparer(func(x, y reflect.Type) bool { return x == y }) // Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) // // In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] // option can be used to ignore all unexported fields on specified struct types. func Exporter(f func(reflect.Type) bool) Option { return exporter(f) } type exporter func(reflect.Type) bool func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { panic("not implemented") } // AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect // unexported fields of the specified struct types. // // See [Exporter] for the proper use of this option. func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { t := reflect.TypeOf(typ) if t.Kind() != reflect.Struct { panic(fmt.Sprintf("invalid struct type: %T", typ)) } m[t] = true } return exporter(func(t reflect.Type) bool { return m[t] }) } // Result represents the comparison result for a single node and // is provided by cmp when calling Report (see [Reporter]). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags } // Equal reports whether the node was determined to be equal or not. // As a special case, ignored nodes are considered equal. func (r Result) Equal() bool { return r.flags&(reportEqual|reportByIgnore) != 0 } // ByIgnore reports whether the node is equal because it was ignored. // This never reports true if [Result.Equal] reports false. func (r Result) ByIgnore() bool { return r.flags&reportByIgnore != 0 } // ByMethod reports whether the Equal method determined equality. func (r Result) ByMethod() bool { return r.flags&reportByMethod != 0 } // ByFunc reports whether a [Comparer] function determined equality. func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } // ByCycle reports whether a reference cycle was detected. func (r Result) ByCycle() bool { return r.flags&reportByCycle != 0 } type resultFlags uint const ( _ resultFlags = (1 << iota) / 2 reportEqual reportUnequal reportByIgnore reportByMethod reportByFunc reportByCycle ) // Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses // the value trees, it calls PushStep as it descends into each node in the // tree and PopStep as it ascend out of the node. The leaves of the tree are // either compared (determined to be equal or not equal) or ignored and reported // as such by calling the Report method. func Reporter(r interface { // PushStep is called when a tree-traversal operation is performed. // The PathStep itself is only valid until the step is popped. // The PathStep.Values are valid for the duration of the entire traversal // and must not be mutated. // // Equal always calls PushStep at the start to provide an operation-less // PathStep used to report the root values. // // Within a slice, the exact set of inserted, removed, or modified elements // is unspecified and may change in future implementations. // The entries of a map are iterated through in an unspecified order. PushStep(PathStep) // Report is called exactly once on leaf nodes to report whether the // comparison identified the node as equal, unequal, or ignored. // A leaf node is one that is immediately preceded by and followed by // a pair of PushStep and PopStep calls. Report(Result) // PopStep ascends back up the value tree. // There is always a matching pop call for every push call. PopStep() }) Option { return reporter{r} } type reporter struct{ reporterIface } type reporterIface interface { PushStep(PathStep) Report(Result) PopStep() } func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { panic("not implemented") } // normalizeOption normalizes the input options such that all Options groups // are flattened and groups with a single element are reduced to that element. // Only coreOptions and Options containing coreOptions are allowed. func normalizeOption(src Option) Option { switch opts := flattenOptions(nil, Options{src}); len(opts) { case 0: return nil case 1: return opts[0] default: return opts } } // flattenOptions copies all options in src to dst as a flat list. // Only coreOptions and Options containing coreOptions are allowed. func flattenOptions(dst, src Options) Options { for _, opt := range src { switch opt := opt.(type) { case nil: continue case Options: dst = flattenOptions(dst, opt) case coreOption: dst = append(dst, opt) default: panic(fmt.Sprintf("invalid option type: %T", opt)) } } return dst } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/report_compare.go0000644000000000000000000003166415024302467025254 0ustar rootroot// Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "fmt" "reflect" ) // numContextRecords is the number of surrounding equal records to print. const numContextRecords = 2 type diffMode byte const ( diffUnknown diffMode = 0 diffIdentical diffMode = ' ' diffRemoved diffMode = '-' diffInserted diffMode = '+' ) type typeMode int const ( // emitType always prints the type. emitType typeMode = iota // elideType never prints the type. elideType // autoType prints the type only for composite kinds // (i.e., structs, slices, arrays, and maps). autoType ) type formatOptions struct { // DiffMode controls the output mode of FormatDiff. // // If diffUnknown, then produce a diff of the x and y values. // If diffIdentical, then emit values as if they were equal. // If diffRemoved, then only emit x values (ignoring y values). // If diffInserted, then only emit y values (ignoring x values). DiffMode diffMode // TypeMode controls whether to print the type for the current node. // // As a general rule of thumb, we always print the type of the next node // after an interface, and always elide the type of the next node after // a slice or map node. TypeMode typeMode // formatValueOptions are options specific to printing reflect.Values. formatValueOptions } func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { opts.DiffMode = d return opts } func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { opts.TypeMode = t return opts } func (opts formatOptions) WithVerbosity(level int) formatOptions { opts.VerbosityLevel = level opts.LimitVerbosity = true return opts } func (opts formatOptions) verbosity() uint { switch { case opts.VerbosityLevel < 0: return 0 case opts.VerbosityLevel > 16: return 16 // some reasonable maximum to avoid shift overflow default: return uint(opts.VerbosityLevel) } } const maxVerbosityPreset = 6 // verbosityPreset modifies the verbosity settings given an index // between 0 and maxVerbosityPreset, inclusive. func verbosityPreset(opts formatOptions, i int) formatOptions { opts.VerbosityLevel = int(opts.verbosity()) + 2*i if i > 0 { opts.AvoidStringer = true } if i >= maxVerbosityPreset { opts.PrintAddresses = true opts.QualifiedNames = true } return opts } // FormatDiff converts a valueNode tree into a textNode tree, where the later // is a textual representation of the differences detected in the former. func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { if opts.DiffMode == diffIdentical { opts = opts.WithVerbosity(1) } else if opts.verbosity() < 3 { opts = opts.WithVerbosity(3) } // Check whether we have specialized formatting for this node. // This is not necessary, but helpful for producing more readable outputs. if opts.CanFormatDiffSlice(v) { return opts.FormatDiffSlice(v) } var parentKind reflect.Kind if v.parent != nil && v.parent.TransformerName == "" { parentKind = v.parent.Type.Kind() } // For leaf nodes, format the value based on the reflect.Values alone. // As a special case, treat equal []byte as a leaf nodes. isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. if v.NumDiff == 0 { outx := opts.FormatValue(v.ValueX, parentKind, ptrs) outy := opts.FormatValue(v.ValueY, parentKind, ptrs) if v.NumIgnored > 0 && v.NumSame == 0 { return textEllipsis } else if outx.Len() < outy.Len() { return outx } else { return outy } } // Format unequal. assert(opts.DiffMode == diffUnknown) var list textList outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) } if outx != nil { list = append(list, textRecord{Diff: '-', Value: outx}) } if outy != nil { list = append(list, textRecord{Diff: '+', Value: outy}) } return opts.WithTypeMode(emitType).FormatType(v.Type, list) case diffRemoved: return opts.FormatValue(v.ValueX, parentKind, ptrs) case diffInserted: return opts.FormatValue(v.ValueY, parentKind, ptrs) default: panic("invalid diff mode") } } // Register slice element to support cycle detection. if parentKind == reflect.Slice { ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) defer ptrs.Pop() defer func() { out = wrapTrunkReferences(ptrRefs, out) }() } // Descend into the child value node. if v.TransformerName != "" { out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} return opts.FormatType(v.Type, out) } else { switch k := v.Type.Kind(); k { case reflect.Struct, reflect.Array, reflect.Slice: out = opts.formatDiffList(v.Records, k, ptrs) out = opts.FormatType(v.Type, out) case reflect.Map: // Register map to support cycle detection. ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) defer ptrs.Pop() out = opts.formatDiffList(v.Records, k, ptrs) out = wrapTrunkReferences(ptrRefs, out) out = opts.FormatType(v.Type, out) case reflect.Ptr: // Register pointer to support cycle detection. ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) defer ptrs.Pop() out = opts.FormatDiff(v.Value, ptrs) out = wrapTrunkReferences(ptrRefs, out) out = &textWrap{Prefix: "&", Value: out} case reflect.Interface: out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) default: panic(fmt.Sprintf("%v cannot have children", k)) } return out } } func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { // Derive record name based on the data structure kind. var name string var formatKey func(reflect.Value) string switch k { case reflect.Struct: name = "field" opts = opts.WithTypeMode(autoType) formatKey = func(v reflect.Value) string { return v.String() } case reflect.Slice, reflect.Array: name = "element" opts = opts.WithTypeMode(elideType) formatKey = func(reflect.Value) string { return "" } case reflect.Map: name = "entry" opts = opts.WithTypeMode(elideType) formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } } maxLen := -1 if opts.LimitVerbosity { if opts.DiffMode == diffIdentical { maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... } else { maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... } opts.VerbosityLevel-- } // Handle unification. switch opts.DiffMode { case diffIdentical, diffRemoved, diffInserted: var list textList var deferredEllipsis bool // Add final "..." to indicate records were dropped for _, r := range recs { if len(list) == maxLen { deferredEllipsis = true break } // Elide struct fields that are zero value. if k == reflect.Struct { var isZero bool switch opts.DiffMode { case diffIdentical: isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: isZero = r.Value.ValueX.IsZero() case diffInserted: isZero = r.Value.ValueY.IsZero() } if isZero { continue } } // Elide ignored nodes. if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) if !deferredEllipsis { list.AppendEllipsis(diffStats{}) } continue } if out := opts.FormatDiff(r.Value, ptrs); out != nil { list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) } } if deferredEllipsis { list.AppendEllipsis(diffStats{}) } return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case diffUnknown: default: panic("invalid diff mode") } // Handle differencing. var numDiffs int var list textList var keys []reflect.Value // invariant: len(list) == len(keys) groups := coalesceAdjacentRecords(name, recs) maxGroup := diffStats{Name: name} for i, ds := range groups { if maxLen >= 0 && numDiffs >= maxLen { maxGroup = maxGroup.Append(ds) continue } // Handle equal records. if ds.NumDiff() == 0 { // Compute the number of leading and trailing records to print. var numLo, numHi int numEqual := ds.NumIgnored + ds.NumIdentical for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { break } numLo++ } for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { break } numHi++ } if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { numHi++ // Avoid pointless coalescing of a single equal record } // Format the equal values. for _, r := range recs[:numLo] { out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) keys = append(keys, r.Key) } if numEqual > numLo+numHi { ds.NumIdentical -= numLo + numHi list.AppendEllipsis(ds) for len(keys) < len(list) { keys = append(keys, reflect.Value{}) } } for _, r := range recs[numEqual-numHi : numEqual] { out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) keys = append(keys, r.Key) } recs = recs[numEqual:] continue } // Handle unequal records. for _, r := range recs[:ds.NumDiff()] { switch { case opts.CanFormatDiffSlice(r.Value): out := opts.FormatDiffSlice(r.Value) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) keys = append(keys, r.Key) case r.Value.NumChildren == r.Value.MaxDepth: outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { opts2 := verbosityPreset(opts, i) outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) } if outx != nil { list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) keys = append(keys, r.Key) } if outy != nil { list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) keys = append(keys, r.Key) } default: out := opts.FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) keys = append(keys, r.Key) } } recs = recs[ds.NumDiff():] numDiffs += ds.NumDiff() } if maxGroup.IsZero() { assert(len(recs) == 0) } else { list.AppendEllipsis(maxGroup) for len(keys) < len(list) { keys = append(keys, reflect.Value{}) } } assert(len(list) == len(keys)) // For maps, the default formatting logic uses fmt.Stringer which may // produce ambiguous output. Avoid calling String to disambiguate. if k == reflect.Map { var ambiguous bool seenKeys := map[string]reflect.Value{} for i, currKey := range keys { if currKey.IsValid() { strKey := list[i].Key prevKey, seen := seenKeys[strKey] if seen && prevKey.CanInterface() && currKey.CanInterface() { ambiguous = prevKey.Interface() != currKey.Interface() if ambiguous { break } } seenKeys[strKey] = currKey } } if ambiguous { for i, k := range keys { if k.IsValid() { list[i].Key = formatMapKey(k, true, ptrs) } } } } return &textWrap{Prefix: "{", Value: list, Suffix: "}"} } // coalesceAdjacentRecords coalesces the list of records into groups of // adjacent equal, or unequal counts. func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { var prevCase int // Arbitrary index into which case last occurred lastStats := func(i int) *diffStats { if prevCase != i { groups = append(groups, diffStats{Name: name}) prevCase = i } return &groups[len(groups)-1] } for _, r := range recs { switch rv := r.Value; { case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: lastStats(1).NumIgnored++ case rv.NumDiff == 0: lastStats(1).NumIdentical++ case rv.NumDiff > 0 && !rv.ValueY.IsValid(): lastStats(2).NumRemoved++ case rv.NumDiff > 0 && !rv.ValueX.IsValid(): lastStats(2).NumInserted++ default: lastStats(2).NumModified++ } } return groups } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/report.go0000644000000000000000000000300715024302467023534 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp // defaultReporter implements the reporter interface. // // As Equal serially calls the PushStep, Report, and PopStep methods, the // defaultReporter constructs a tree-based representation of the compared value // and the result of each comparison (see valueNode). // // When the String method is called, the FormatDiff method transforms the // valueNode tree into a textNode tree, which is a tree-based representation // of the textual output (see textNode). // // Lastly, the textNode.String method produces the final report as a string. type defaultReporter struct { root *valueNode curr *valueNode } func (r *defaultReporter) PushStep(ps PathStep) { r.curr = r.curr.PushStep(ps) if r.root == nil { r.root = r.curr } } func (r *defaultReporter) Report(rs Result) { r.curr.Report(rs) } func (r *defaultReporter) PopStep() { r.curr = r.curr.PopStep() } // String provides a full report of the differences detected as a structured // literal in pseudo-Go syntax. String may only be called after the entire tree // has been traversed. func (r *defaultReporter) String() string { assert(r.root != nil && r.curr == nil) if r.root.NumDiff == 0 { return "" } ptrs := new(pointerReferences) text := formatOptions{}.FormatDiff(r.root, ptrs) resolveReferences(text) return text.String() } func assert(ok bool) { if !ok { panic("assertion failure") } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/compare.go0000644000000000000000000005455115024302467023661 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package cmp determines equality of values. // // This package is intended to be a more powerful and safer alternative to // [reflect.DeepEqual] for comparing whether two values are semantically equal. // It is intended to only be used in tests, as performance is not a goal and // it may panic if it cannot compare the values. Its propensity towards // panicking means that its unsuitable for production environments where a // spurious panic may be fatal. // // The primary features of cmp are: // // - When the default behavior of equality does not suit the test's needs, // custom equality functions can override the equality operation. // For example, an equality function may report floats as equal so long as // they are within some tolerance of each other. // // - Types with an Equal method (e.g., [time.Time.Equal]) may use that method // to determine equality. This allows package authors to determine // the equality operation for the types that they define. // // - If no custom equality functions are used and no Equal method is defined, // equality is determined by recursively comparing the primitive kinds on // both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], // unexported fields are not compared by default; they result in panics // unless suppressed by using an [Ignore] option // (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) // or explicitly compared using the [Exporter] option. package cmp import ( "fmt" "reflect" "strings" "github.com/google/go-cmp/cmp/internal/diff" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) // TODO(≥go1.18): Use any instead of interface{}. // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // // - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that // remain after applying all path filters, value filters, and type filters. // If at least one [Ignore] exists in S, then the comparison is ignored. // If the number of [Transformer] and [Comparer] options in S is non-zero, // then Equal panics because it is ambiguous which option to use. // If S contains a single [Transformer], then use that to transform // the current values and recursively call Equal on the output values. // If S contains a single [Comparer], then use that to compare the current values. // Otherwise, evaluation proceeds to the next rule. // // - If the values have an Equal method of the form "(T) Equal(T) bool" or // "(T) Equal(I) bool" where T is assignable to I, then use the result of // x.Equal(y) even if x or y is nil. Otherwise, no such method exists and // evaluation proceeds to the next rule. // // - Lastly, try to compare x and y based on their basic kinds. // Simple kinds like booleans, integers, floats, complex numbers, strings, // and channels are compared using the equivalent of the == operator in Go. // Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an [Ignore] option // (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field // or the [Exporter] option explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. // Empty non-nil slices and nil slices are not equal; to equate empty slices, // consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Maps are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored map entries report equal. // Map keys are equal according to the == operator. // To use custom comparisons for map keys, consider using // [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. // Empty non-nil maps and nil maps are not equal; to equate empty maps, // consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively // calling Equal on the underlying values reports equal. // // Before recursing into a pointer, slice element, or map, the current path // is checked to detect whether the address has already been visited. // If there is a cycle, then the pointed at values are considered equal // only if both addresses were previously visited in the same path step. func Equal(x, y interface{}, opts ...Option) bool { s := newState(opts) s.compareAny(rootStep(x, y)) return s.result.Equal() } // Diff returns a human-readable report of the differences between two values: // y - x. It returns an empty string if and only if Equal returns true for the // same input values and options. // // The output is displayed as a literal in pseudo-Go syntax. // At the start of each line, a "-" prefix indicates an element removed from x, // a "+" prefix to indicates an element added from y, and the lack of a prefix // indicates an element common to both x and y. If possible, the output // uses fmt.Stringer.String or error.Error methods to produce more humanly // readable outputs. In such cases, the string is prefixed with either an // 's' or 'e' character, respectively, to indicate that the method was called. // // Do not depend on this output being stable. If you need the ability to // programmatically interpret the difference, consider using a custom Reporter. func Diff(x, y interface{}, opts ...Option) string { s := newState(opts) // Optimization: If there are no other reporters, we can optimize for the // common case where the result is equal (and thus no reported difference). // This avoids the expensive construction of a difference tree. if len(s.reporters) == 0 { s.compareAny(rootStep(x, y)) if s.result.Equal() { return "" } s.result = diff.Result{} // Reset results } r := new(defaultReporter) s.reporters = append(s.reporters, reporter{r}) s.compareAny(rootStep(x, y)) d := r.String() if (d == "") != s.result.Equal() { panic("inconsistent difference and equality results") } return d } // rootStep constructs the first path step. If x and y have differing types, // then they are stored within an empty interface type. func rootStep(x, y interface{}) PathStep { vx := reflect.ValueOf(x) vy := reflect.ValueOf(y) // If the inputs are different types, auto-wrap them in an empty interface // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) vx = vvx } if vy.IsValid() { vvy := reflect.New(t).Elem() vvy.Set(vy) vy = vvy } } else { t = vx.Type() } return &pathStep{t, vx, vy} } type state struct { // These fields represent the "comparison state". // Calling statelessCompare must not result in observable changes to these. result diff.Result // The current result of comparison curPath Path // The current path in the value tree curPtrs pointerPath // The current set of visited pointers reporters []reporter // Optional reporters // recChecker checks for infinite cycles applying the same set of // transformers upon the output of itself. recChecker recChecker // dynChecker triggers pseudo-random checks for option correctness. // It is safe for statelessCompare to mutate this value. dynChecker dynChecker // These fields, once set by processOption, will not change. exporters []exporter // List of exporters for structs with unexported fields opts Options // List of all fundamental and filter options } func newState(opts []Option) *state { // Always ensure a validator option exists to validate the inputs. s := &state{opts: Options{validator{}}} s.curPtrs.Init() s.processOption(Options(opts)) return s } func (s *state) processOption(opt Option) { switch opt := opt.(type) { case nil: case Options: for _, o := range opt { s.processOption(o) } case coreOption: type filtered interface { isFiltered() bool } if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) } s.opts = append(s.opts, opt) case exporter: s.exporters = append(s.exporters, opt) case reporter: s.reporters = append(s.reporters, opt) default: panic(fmt.Sprintf("unknown option %T", opt)) } } // statelessCompare compares two values and returns the result. // This function is stateless in that it does not alter the current result, // or output to any registered reporters. func (s *state) statelessCompare(step PathStep) diff.Result { // We do not save and restore curPath and curPtrs because all of the // compareX methods should properly push and pop from them. // It is an implementation bug if the contents of the paths differ from // when calling this function to when returning from it. oldResult, oldReporters := s.result, s.reporters s.result = diff.Result{} // Reset result s.reporters = nil // Remove reporters to avoid spurious printouts s.compareAny(step) res := s.result s.result, s.reporters = oldResult, oldReporters return res } func (s *state) compareAny(step PathStep) { // Update the path stack. s.curPath.push(step) defer s.curPath.pop() for _, r := range s.reporters { r.PushStep(step) defer r.PopStep() } s.recChecker.Check(s.curPath) // Cycle-detection for slice elements (see NOTE in compareSlice). t := step.Type() vx, vy := step.Values() if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { px, py := vx.Addr(), vy.Addr() if eq, visited := s.curPtrs.Push(px, py); visited { s.report(eq, reportByCycle) return } defer s.curPtrs.Pop(px, py) } // Rule 1: Check whether an option applies on this node in the value tree. if s.tryOptions(t, vx, vy) { return } // Rule 2: Check whether the type has a valid Equal method. if s.tryMethod(t, vx, vy) { return } // Rule 3: Compare based on the underlying kind. switch t.Kind() { case reflect.Bool: s.report(vx.Bool() == vy.Bool(), 0) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: s.report(vx.Int() == vy.Int(), 0) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: s.report(vx.Uint() == vy.Uint(), 0) case reflect.Float32, reflect.Float64: s.report(vx.Float() == vy.Float(), 0) case reflect.Complex64, reflect.Complex128: s.report(vx.Complex() == vy.Complex(), 0) case reflect.String: s.report(vx.String() == vy.String(), 0) case reflect.Chan, reflect.UnsafePointer: s.report(vx.Pointer() == vy.Pointer(), 0) case reflect.Func: s.report(vx.IsNil() && vy.IsNil(), 0) case reflect.Struct: s.compareStruct(t, vx, vy) case reflect.Slice, reflect.Array: s.compareSlice(t, vx, vy) case reflect.Map: s.compareMap(t, vx, vy) case reflect.Ptr: s.comparePtr(t, vx, vy) case reflect.Interface: s.compareInterface(t, vx, vy) default: panic(fmt.Sprintf("%v kind not handled", t.Kind())) } } func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { // Evaluate all filters and apply the remaining options. if opt := s.opts.filter(s, t, vx, vy); opt != nil { opt.apply(s, vx, vy) return true } return false } func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { // Check if this type even has an Equal method. m, ok := t.MethodByName("Equal") if !ok || !function.IsType(m.Type, function.EqualAssignable) { return false } eq := s.callTTBFunc(m.Func, vx, vy) s.report(eq, reportByMethod) return true } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } // Run the function twice and ensure that we get the same results back. // We run in goroutines so that the race detector (if enabled) can detect // unsafe mutations to the input. c := make(chan reflect.Value) go detectRaces(c, f, v) got := <-c want := f.Call([]reflect.Value{v})[0] if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { // To avoid false-positives with non-reflexive equality operations, // we sanity check whether a value is equal to itself. if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { return want } panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) } return want } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } // Swapping the input arguments is sufficient to check that // f is symmetric and deterministic. // We run in goroutines so that the race detector (if enabled) can detect // unsafe mutations to the input. c := make(chan reflect.Value) go detectRaces(c, f, y, x) got := <-c want := f.Call([]reflect.Value{x, y})[0].Bool() if !got.IsValid() || got.Bool() != want { panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) } return want } func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { var ret reflect.Value defer func() { recover() // Ignore panics, let the other call to f panic instead c <- ret }() ret = f.Call(vs)[0] } func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy var mayForce, mayForceInit bool step := StructField{&structField{}} for i := 0; i < t.NumField(); i++ { step.typ = t.Field(i).Type step.vx = vx.Field(i) step.vy = vy.Field(i) step.name = t.Field(i).Name step.idx = i step.unexported = !isExported(step.name) if step.unexported { if step.name == "_" { continue } // Defer checking of unexported fields until later to give an // Ignore a chance to ignore the field. if !vax.IsValid() || !vay.IsValid() { // For retrieveUnexportedField to work, the parent struct must // be addressable. Create a new copy of the values if // necessary to make them addressable. addr = vx.CanAddr() || vy.CanAddr() vax = makeAddressable(vx) vay = makeAddressable(vy) } if !mayForceInit { for _, xf := range s.exporters { mayForce = mayForce || xf(t) } mayForceInit = true } step.mayForce = mayForce step.paddr = addr step.pvx = vax step.pvy = vay step.field = t.Field(i) } s.compareAny(step) } } func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { isSlice := t.Kind() == reflect.Slice if isSlice && (vx.IsNil() || vy.IsNil()) { s.report(vx.IsNil() && vy.IsNil(), 0) return } // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer // since slices represents a list of pointers, rather than a single pointer. // The pointer checking logic must be handled on a per-element basis // in compareAny. // // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting // pointer P, a length N, and a capacity C. Supposing each slice element has // a memory size of M, then the slice is equivalent to the list of pointers: // [P+i*M for i in range(N)] // // For example, v[:0] and v[:1] are slices with the same starting pointer, // but they are clearly different values. Using the slice pointer alone // violates the assumption that equal pointers implies equal values. step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} withIndexes := func(ix, iy int) SliceIndex { if ix >= 0 { step.vx, step.xkey = vx.Index(ix), ix } else { step.vx, step.xkey = reflect.Value{}, -1 } if iy >= 0 { step.vy, step.ykey = vy.Index(iy), iy } else { step.vy, step.ykey = reflect.Value{}, -1 } return step } // Ignore options are able to ignore missing elements in a slice. // However, detecting these reliably requires an optimal differencing // algorithm, for which diff.Difference is not. // // Instead, we first iterate through both slices to detect which elements // would be ignored if standing alone. The index of non-discarded elements // are stored in a separate slice, which diffing is then performed on. var indexesX, indexesY []int var ignoredX, ignoredY []bool for ix := 0; ix < vx.Len(); ix++ { ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 if !ignored { indexesX = append(indexesX, ix) } ignoredX = append(ignoredX, ignored) } for iy := 0; iy < vy.Len(); iy++ { ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 if !ignored { indexesY = append(indexesY, iy) } ignoredY = append(ignoredY, ignored) } // Compute an edit-script for slices vx and vy (excluding ignored elements). edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) }) // Replay the ignore-scripts and the edit-script. var ix, iy int for ix < vx.Len() || iy < vy.Len() { var e diff.EditType switch { case ix < len(ignoredX) && ignoredX[ix]: e = diff.UniqueX case iy < len(ignoredY) && ignoredY[iy]: e = diff.UniqueY default: e, edits = edits[0], edits[1:] } switch e { case diff.UniqueX: s.compareAny(withIndexes(ix, -1)) ix++ case diff.UniqueY: s.compareAny(withIndexes(-1, iy)) iy++ default: s.compareAny(withIndexes(ix, iy)) ix++ iy++ } } } func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { if vx.IsNil() || vy.IsNil() { s.report(vx.IsNil() && vy.IsNil(), 0) return } // Cycle-detection for maps. if eq, visited := s.curPtrs.Push(vx, vy); visited { s.report(eq, reportByCycle) return } defer s.curPtrs.Pop(vx, vy) // We combine and sort the two map keys so that we can perform the // comparisons in a deterministic order. step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { step.vx = vx.MapIndex(k) step.vy = vy.MapIndex(k) step.key = k if !step.vx.IsValid() && !step.vy.IsValid() { // It is possible for both vx and vy to be invalid if the // key contained a NaN value in it. // // Even with the ability to retrieve NaN keys in Go 1.12, // there still isn't a sensible way to compare the values since // a NaN key may map to multiple unordered values. // The most reasonable way to compare NaNs would be to compare the // set of values. However, this is impossible to do efficiently // since set equality is provably an O(n^2) operation given only // an Equal function. If we had a Less function or Hash function, // this could be done in O(n*log(n)) or O(n), respectively. // // Rather than adding complex logic to deal with NaNs, make it // the user's responsibility to compare such obscure maps. const help = "consider providing a Comparer to compare the map" panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) } s.compareAny(step) } } func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { if vx.IsNil() || vy.IsNil() { s.report(vx.IsNil() && vy.IsNil(), 0) return } // Cycle-detection for pointers. if eq, visited := s.curPtrs.Push(vx, vy); visited { s.report(eq, reportByCycle) return } defer s.curPtrs.Pop(vx, vy) vx, vy = vx.Elem(), vy.Elem() s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) } func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { if vx.IsNil() || vy.IsNil() { s.report(vx.IsNil() && vy.IsNil(), 0) return } vx, vy = vx.Elem(), vy.Elem() if vx.Type() != vy.Type() { s.report(false, 0) return } s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) } func (s *state) report(eq bool, rf resultFlags) { if rf&reportByIgnore == 0 { if eq { s.result.NumSame++ rf |= reportEqual } else { s.result.NumDiff++ rf |= reportUnequal } } for _, r := range s.reporters { r.Report(Result{flags: rf}) } } // recChecker tracks the state needed to periodically perform checks that // user provided transformers are not stuck in an infinitely recursive cycle. type recChecker struct{ next int } // Check scans the Path for any recursive transformers and panics when any // recursive transformers are detected. Note that the presence of a // recursive Transformer does not necessarily imply an infinite cycle. // As such, this check only activates after some minimal number of path steps. func (rc *recChecker) Check(p Path) { const minLen = 1 << 16 if rc.next == 0 { rc.next = minLen } if len(p) < rc.next { return } rc.next <<= 1 // Check whether the same transformer has appeared at least twice. var ss []string m := map[Option]int{} for _, ps := range p { if t, ok := ps.(Transform); ok { t := t.Option() if m[t] == 1 { // Transformer was used exactly once before tf := t.(*transformer).fnc.Type() ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) } m[t]++ } } if len(ss) > 0 { const warning = "recursive set of Transformers detected" const help = "consider using cmpopts.AcyclicTransformer" set := strings.Join(ss, "\n\t") panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) } } // dynChecker tracks the state needed to periodically perform checks that // user provided functions are symmetric and deterministic. // The zero value is safe for immediate use. type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: // // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... // // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as // the number of functions calls grows larger. func (dc *dynChecker) Next() bool { ok := dc.curr == dc.next if ok { dc.curr = 0 dc.next++ } dc.curr++ return ok } // makeAddressable returns a value that is always addressable. // It returns the input verbatim if it is already addressable, // otherwise it creates a new value and returns an addressable copy. func makeAddressable(v reflect.Value) reflect.Value { if v.CanAddr() { return v } vc := reflect.New(v.Type()).Elem() vc.Set(v) return vc } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/compare_test.go0000644000000000000000000033554115024302467024721 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp_test import ( "bytes" "crypto/sha256" "encoding/json" "errors" "flag" "fmt" "io" "io/ioutil" "math" "math/rand" "reflect" "regexp" "sort" "strconv" "strings" "sync" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/internal/flags" pb "github.com/google/go-cmp/cmp/internal/testprotos" ts "github.com/google/go-cmp/cmp/internal/teststructs" foo1 "github.com/google/go-cmp/cmp/internal/teststructs/foo1" foo2 "github.com/google/go-cmp/cmp/internal/teststructs/foo2" ) func init() { flags.Deterministic = true } var update = flag.Bool("update", false, "update golden test files") const goldenHeaderPrefix = "<<< " const goldenFooterPrefix = ">>> " // mustParseGolden parses a file as a set of key-value pairs. // // The syntax is simple and looks something like: // // <<< Key1 // value1a // value1b // >>> Key1 // <<< Key2 // value2 // >>> Key2 // // It is the user's responsibility to choose a sufficiently unique key name // such that it never appears in the body of the value itself. func mustParseGolden(path string) map[string]string { b, err := ioutil.ReadFile(path) if err != nil { panic(err) } s := string(b) out := map[string]string{} for len(s) > 0 { // Identify the next header. i := strings.Index(s, "\n") + len("\n") header := s[:i] if !strings.HasPrefix(header, goldenHeaderPrefix) { panic(fmt.Sprintf("invalid header: %q", header)) } // Locate the next footer. footer := goldenFooterPrefix + header[len(goldenHeaderPrefix):] j := strings.Index(s, footer) if j < 0 { panic(fmt.Sprintf("missing footer: %q", footer)) } // Store the name and data. name := header[len(goldenHeaderPrefix) : len(header)-len("\n")] if _, ok := out[name]; ok { panic(fmt.Sprintf("duplicate name: %q", name)) } out[name] = s[len(header):j] s = s[j+len(footer):] } return out } func mustFormatGolden(path string, in []struct{ Name, Data string }) { var b []byte for _, v := range in { b = append(b, goldenHeaderPrefix+v.Name+"\n"...) b = append(b, v.Data...) b = append(b, goldenFooterPrefix+v.Name+"\n"...) } if err := ioutil.WriteFile(path, b, 0664); err != nil { panic(err) } } var now = time.Date(2009, time.November, 10, 23, 00, 00, 00, time.UTC) // TODO(≥go1.18): Define a generic function that boxes a value on the heap. func newInt(n int) *int { return &n } type Stringer string func newStringer(s string) fmt.Stringer { return (*Stringer)(&s) } func (s Stringer) String() string { return string(s) } type test struct { label string // Test name x, y interface{} // Input values to compare opts []cmp.Option // Input options wantEqual bool // Whether any difference is expected wantPanic string // Sub-string of an expected panic message reason string // The reason for the expected outcome } func TestDiff(t *testing.T) { var tests []test tests = append(tests, comparerTests()...) tests = append(tests, transformerTests()...) tests = append(tests, reporterTests()...) tests = append(tests, embeddedTests()...) tests = append(tests, methodTests()...) tests = append(tests, cycleTests()...) tests = append(tests, project1Tests()...) tests = append(tests, project2Tests()...) tests = append(tests, project3Tests()...) tests = append(tests, project4Tests()...) const goldenFile = "testdata/diffs" gotDiffs := []struct{ Name, Data string }{} wantDiffs := mustParseGolden(goldenFile) for _, tt := range tests { tt := tt t.Run(tt.label, func(t *testing.T) { if !*update { t.Parallel() } var gotDiff, gotPanic string func() { defer func() { if ex := recover(); ex != nil { if s, ok := ex.(string); ok { gotPanic = s } else { panic(ex) } } }() gotDiff = cmp.Diff(tt.x, tt.y, tt.opts...) }() switch { case strings.Contains(t.Name(), "#"): panic("unique test name must be provided") case tt.reason == "": panic("reason must be provided") case tt.wantPanic == "": if gotPanic != "" { t.Fatalf("unexpected panic message: %s\nreason: %v", gotPanic, tt.reason) } if *update { if gotDiff != "" { gotDiffs = append(gotDiffs, struct{ Name, Data string }{t.Name(), gotDiff}) } } else { wantDiff := wantDiffs[t.Name()] if diff := cmp.Diff(wantDiff, gotDiff); diff != "" { t.Fatalf("Diff:\ngot:\n%s\nwant:\n%s\ndiff: (-want +got)\n%s\nreason: %v", gotDiff, wantDiff, diff, tt.reason) } } gotEqual := gotDiff == "" if gotEqual != tt.wantEqual { t.Fatalf("Equal = %v, want %v\nreason: %v", gotEqual, tt.wantEqual, tt.reason) } default: if !strings.Contains(gotPanic, tt.wantPanic) { t.Fatalf("panic message:\ngot: %s\nwant: %s\nreason: %v", gotPanic, tt.wantPanic, tt.reason) } } }) } if *update { mustFormatGolden(goldenFile, gotDiffs) } } func comparerTests() []test { const label = "Comparer" type Iface1 interface { Method() } type Iface2 interface { Method() } type tarHeader struct { Name string Mode int64 Uid int Gid int Size int64 ModTime time.Time Typeflag byte Linkname string Uname string Gname string Devmajor int64 Devminor int64 AccessTime time.Time ChangeTime time.Time Xattrs map[string]string } type namedWithUnexported struct { unexported string } makeTarHeaders := func(tf byte) (hs []tarHeader) { for i := 0; i < 5; i++ { hs = append(hs, tarHeader{ Name: fmt.Sprintf("some/dummy/test/file%d", i), Mode: 0664, Uid: i * 1000, Gid: i * 1000, Size: 1 << uint(i), ModTime: now.Add(time.Duration(i) * time.Hour), Uname: "user", Gname: "group", Typeflag: tf, }) } return hs } return []test{{ label: label + "/Nil", x: nil, y: nil, wantEqual: true, reason: "nils are equal", }, { label: label + "/Integer", x: 1, y: 1, wantEqual: true, reason: "identical integers are equal", }, { label: label + "/UnfilteredIgnore", x: 1, y: 1, opts: []cmp.Option{cmp.Ignore()}, wantPanic: "cannot use an unfiltered option", reason: "unfiltered options are functionally useless", }, { label: label + "/UnfilteredCompare", x: 1, y: 1, opts: []cmp.Option{cmp.Comparer(func(_, _ interface{}) bool { return true })}, wantPanic: "cannot use an unfiltered option", reason: "unfiltered options are functionally useless", }, { label: label + "/UnfilteredTransform", x: 1, y: 1, opts: []cmp.Option{cmp.Transformer("λ", func(x interface{}) interface{} { return x })}, wantPanic: "cannot use an unfiltered option", reason: "unfiltered options are functionally useless", }, { label: label + "/AmbiguousOptions", x: 1, y: 1, opts: []cmp.Option{ cmp.Comparer(func(x, y int) bool { return true }), cmp.Transformer("λ", func(x int) float64 { return float64(x) }), }, wantPanic: "ambiguous set of applicable options", reason: "both options apply on int, leading to ambiguity", }, { label: label + "/IgnorePrecedence", x: 1, y: 1, opts: []cmp.Option{ cmp.FilterPath(func(p cmp.Path) bool { return len(p) > 0 && p[len(p)-1].Type().Kind() == reflect.Int }, cmp.Options{cmp.Ignore(), cmp.Ignore(), cmp.Ignore()}), cmp.Comparer(func(x, y int) bool { return true }), cmp.Transformer("λ", func(x int) float64 { return float64(x) }), }, wantEqual: true, reason: "ignore takes precedence over other options", }, { label: label + "/UnknownOption", opts: []cmp.Option{struct{ cmp.Option }{}}, wantPanic: "unknown option", reason: "use of unknown option should panic", }, { label: label + "/StructEqual", x: struct{ A, B, C int }{1, 2, 3}, y: struct{ A, B, C int }{1, 2, 3}, wantEqual: true, reason: "struct comparison with all equal fields", }, { label: label + "/StructInequal", x: struct{ A, B, C int }{1, 2, 3}, y: struct{ A, B, C int }{1, 2, 4}, wantEqual: false, reason: "struct comparison with inequal C field", }, { label: label + "/StructUnexported", x: struct{ a, b, c int }{1, 2, 3}, y: struct{ a, b, c int }{1, 2, 4}, wantPanic: "cannot handle unexported field", reason: "unexported fields result in a panic by default", }, { label: label + "/PointerStructEqual", x: &struct{ A *int }{newInt(4)}, y: &struct{ A *int }{newInt(4)}, wantEqual: true, reason: "comparison of pointer to struct with equal A field", }, { label: label + "/PointerStructInequal", x: &struct{ A *int }{newInt(4)}, y: &struct{ A *int }{newInt(5)}, wantEqual: false, reason: "comparison of pointer to struct with inequal A field", }, { label: label + "/PointerStructTrueComparer", x: &struct{ A *int }{newInt(4)}, y: &struct{ A *int }{newInt(5)}, opts: []cmp.Option{ cmp.Comparer(func(x, y int) bool { return true }), }, wantEqual: true, reason: "comparison of pointer to struct with inequal A field, but treated as equal with always equal comparer", }, { label: label + "/PointerStructNonNilComparer", x: &struct{ A *int }{newInt(4)}, y: &struct{ A *int }{newInt(5)}, opts: []cmp.Option{ cmp.Comparer(func(x, y *int) bool { return x != nil && y != nil }), }, wantEqual: true, reason: "comparison of pointer to struct with inequal A field, but treated as equal with comparer checking pointers for nilness", }, { label: label + "/StructNestedPointerEqual", x: &struct{ R *bytes.Buffer }{}, y: &struct{ R *bytes.Buffer }{}, wantEqual: true, reason: "equal since both pointers in R field are nil", }, { label: label + "/StructNestedPointerInequal", x: &struct{ R *bytes.Buffer }{new(bytes.Buffer)}, y: &struct{ R *bytes.Buffer }{}, wantEqual: false, reason: "inequal since R field is inequal", }, { label: label + "/StructNestedPointerTrueComparer", x: &struct{ R *bytes.Buffer }{new(bytes.Buffer)}, y: &struct{ R *bytes.Buffer }{}, opts: []cmp.Option{ cmp.Comparer(func(x, y io.Reader) bool { return true }), }, wantEqual: true, reason: "equal despite inequal R field values since the comparer always reports true", }, { label: label + "/StructNestedValueUnexportedPanic1", x: &struct{ R bytes.Buffer }{}, y: &struct{ R bytes.Buffer }{}, wantPanic: "cannot handle unexported field", reason: "bytes.Buffer contains unexported fields", }, { label: label + "/StructNestedValueUnexportedPanic2", x: &struct{ R bytes.Buffer }{}, y: &struct{ R bytes.Buffer }{}, opts: []cmp.Option{ cmp.Comparer(func(x, y io.Reader) bool { return true }), }, wantPanic: "cannot handle unexported field", reason: "bytes.Buffer value does not implement io.Reader", }, { label: label + "/StructNestedValueEqual", x: &struct{ R bytes.Buffer }{}, y: &struct{ R bytes.Buffer }{}, opts: []cmp.Option{ cmp.Transformer("Ref", func(x bytes.Buffer) *bytes.Buffer { return &x }), cmp.Comparer(func(x, y io.Reader) bool { return true }), }, wantEqual: true, reason: "bytes.Buffer pointer due to shallow copy does implement io.Reader", }, { label: label + "/RegexpUnexportedPanic", x: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, y: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, wantPanic: "cannot handle unexported field", reason: "regexp.Regexp contains unexported fields", }, { label: label + "/RegexpEqual", x: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, y: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, opts: []cmp.Option{cmp.Comparer(func(x, y *regexp.Regexp) bool { if x == nil || y == nil { return x == nil && y == nil } return x.String() == y.String() })}, wantEqual: true, reason: "comparer for *regexp.Regexp applied with equal regexp strings", }, { label: label + "/RegexpInequal", x: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, y: []*regexp.Regexp{nil, regexp.MustCompile("a*b*d*")}, opts: []cmp.Option{cmp.Comparer(func(x, y *regexp.Regexp) bool { if x == nil || y == nil { return x == nil && y == nil } return x.String() == y.String() })}, wantEqual: false, reason: "comparer for *regexp.Regexp applied with inequal regexp strings", }, { label: label + "/TriplePointerEqual", x: func() ***int { a := 0 b := &a c := &b return &c }(), y: func() ***int { a := 0 b := &a c := &b return &c }(), wantEqual: true, reason: "three layers of pointers to the same value", }, { label: label + "/TriplePointerInequal", x: func() ***int { a := 0 b := &a c := &b return &c }(), y: func() ***int { a := 1 b := &a c := &b return &c }(), wantEqual: false, reason: "three layers of pointers to different values", }, { label: label + "/SliceWithDifferingCapacity", x: []int{1, 2, 3, 4, 5}[:3], y: []int{1, 2, 3}, wantEqual: true, reason: "elements past the slice length are not compared", }, { label: label + "/StringerEqual", x: struct{ fmt.Stringer }{bytes.NewBufferString("hello")}, y: struct{ fmt.Stringer }{regexp.MustCompile("hello")}, opts: []cmp.Option{cmp.Comparer(func(x, y fmt.Stringer) bool { return x.String() == y.String() })}, wantEqual: true, reason: "comparer for fmt.Stringer used to compare differing types with same string", }, { label: label + "/StringerInequal", x: struct{ fmt.Stringer }{bytes.NewBufferString("hello")}, y: struct{ fmt.Stringer }{regexp.MustCompile("hello2")}, opts: []cmp.Option{cmp.Comparer(func(x, y fmt.Stringer) bool { return x.String() == y.String() })}, wantEqual: false, reason: "comparer for fmt.Stringer used to compare differing types with different strings", }, { label: label + "/DifferingHash", x: sha256.Sum256([]byte{'a'}), y: sha256.Sum256([]byte{'b'}), wantEqual: false, reason: "hash differs", }, { label: label + "/NilStringer", x: new(fmt.Stringer), y: nil, wantEqual: false, reason: "by default differing types are always inequal", }, { label: label + "/TarHeaders", x: makeTarHeaders('0'), y: makeTarHeaders('\x00'), wantEqual: false, reason: "type flag differs between the headers", }, { label: label + "/NonDeterministicComparer", x: make([]int, 1000), y: make([]int, 1000), opts: []cmp.Option{ cmp.Comparer(func(_, _ int) bool { return rand.Intn(2) == 0 }), }, wantPanic: "non-deterministic or non-symmetric function detected", reason: "non-deterministic comparer", }, { label: label + "/NonDeterministicFilter", x: make([]int, 1000), y: make([]int, 1000), opts: []cmp.Option{ cmp.FilterValues(func(_, _ int) bool { return rand.Intn(2) == 0 }, cmp.Ignore()), }, wantPanic: "non-deterministic or non-symmetric function detected", reason: "non-deterministic filter", }, { label: label + "/AsymmetricComparer", x: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, y: []int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1}, opts: []cmp.Option{ cmp.Comparer(func(x, y int) bool { return x < y }), }, wantPanic: "non-deterministic or non-symmetric function detected", reason: "asymmetric comparer", }, { label: label + "/NonDeterministicTransformer", x: make([]string, 1000), y: make([]string, 1000), opts: []cmp.Option{ cmp.Transformer("λ", func(x string) int { return rand.Int() }), }, wantPanic: "non-deterministic function detected", reason: "non-deterministic transformer", }, { label: label + "/IrreflexiveComparison", x: make([]int, 10), y: make([]int, 10), opts: []cmp.Option{ cmp.Transformer("λ", func(x int) float64 { return math.NaN() }), }, wantEqual: false, reason: "dynamic checks should not panic for non-reflexive comparisons", }, { label: label + "/StringerMapKey", x: map[*pb.Stringer]*pb.Stringer{{"hello"}: {"world"}}, y: map[*pb.Stringer]*pb.Stringer(nil), wantEqual: false, reason: "stringer should be used to format the map key", }, { label: label + "/StringerBacktick", x: []*pb.Stringer{{`multi\nline\nline\nline`}}, wantEqual: false, reason: "stringer should use backtick quoting if more readable", }, { label: label + "/AvoidPanicAssignableConverter", x: struct{ I Iface2 }{}, y: struct{ I Iface2 }{}, opts: []cmp.Option{ cmp.Comparer(func(x, y Iface1) bool { return x == nil && y == nil }), }, wantEqual: true, reason: "function call using Go reflection should automatically convert assignable interfaces; see https://golang.org/issues/22143", }, { label: label + "/AvoidPanicAssignableTransformer", x: struct{ I Iface2 }{}, y: struct{ I Iface2 }{}, opts: []cmp.Option{ cmp.Transformer("λ", func(v Iface1) bool { return v == nil }), }, wantEqual: true, reason: "function call using Go reflection should automatically convert assignable interfaces; see https://golang.org/issues/22143", }, { label: label + "/AvoidPanicAssignableFilter", x: struct{ I Iface2 }{}, y: struct{ I Iface2 }{}, opts: []cmp.Option{ cmp.FilterValues(func(x, y Iface1) bool { return x == nil && y == nil }, cmp.Ignore()), }, wantEqual: true, reason: "function call using Go reflection should automatically convert assignable interfaces; see https://golang.org/issues/22143", }, { label: label + "/DynamicMap", x: []interface{}{map[string]interface{}{"avg": 0.278, "hr": 65, "name": "Mark McGwire"}, map[string]interface{}{"avg": 0.288, "hr": 63, "name": "Sammy Sosa"}}, y: []interface{}{map[string]interface{}{"avg": 0.278, "hr": 65.0, "name": "Mark McGwire"}, map[string]interface{}{"avg": 0.288, "hr": 63.0, "name": "Sammy Sosa"}}, wantEqual: false, reason: "dynamic map with differing types (but semantically equivalent values) should be inequal", }, { label: label + "/MapKeyPointer", x: map[*int]string{ new(int): "hello", }, y: map[*int]string{ new(int): "world", }, wantEqual: false, reason: "map keys should use shallow (rather than deep) pointer comparison", }, { label: label + "/IgnoreSliceElements", x: [2][]int{ {0, 0, 0, 1, 2, 3, 0, 0, 4, 5, 6, 7, 8, 0, 9, 0, 0}, {0, 1, 0, 0, 0, 20}, }, y: [2][]int{ {1, 2, 3, 0, 4, 5, 6, 7, 0, 8, 9, 0, 0, 0}, {0, 0, 1, 2, 0, 0, 0}, }, opts: []cmp.Option{ cmp.FilterPath(func(p cmp.Path) bool { vx, vy := p.Last().Values() if vx.IsValid() && vx.Kind() == reflect.Int && vx.Int() == 0 { return true } if vy.IsValid() && vy.Kind() == reflect.Int && vy.Int() == 0 { return true } return false }, cmp.Ignore()), }, wantEqual: false, reason: "all zero slice elements are ignored (even if missing)", }, { label: label + "/IgnoreMapEntries", x: [2]map[string]int{ {"ignore1": 0, "ignore2": 0, "keep1": 1, "keep2": 2, "KEEP3": 3, "IGNORE3": 0}, {"keep1": 1, "ignore1": 0}, }, y: [2]map[string]int{ {"ignore1": 0, "ignore3": 0, "ignore4": 0, "keep1": 1, "keep2": 2, "KEEP3": 3}, {"keep1": 1, "keep2": 2, "ignore2": 0}, }, opts: []cmp.Option{ cmp.FilterPath(func(p cmp.Path) bool { vx, vy := p.Last().Values() if vx.IsValid() && vx.Kind() == reflect.Int && vx.Int() == 0 { return true } if vy.IsValid() && vy.Kind() == reflect.Int && vy.Int() == 0 { return true } return false }, cmp.Ignore()), }, wantEqual: false, reason: "all zero map entries are ignored (even if missing)", }, { label: label + "/PanicUnexportedNamed", x: namedWithUnexported{unexported: "x"}, y: namedWithUnexported{unexported: "y"}, wantPanic: strconv.Quote(reflect.TypeOf(namedWithUnexported{}).PkgPath()) + ".namedWithUnexported", reason: "panic on named struct type with unexported field", }, { label: label + "/PanicUnexportedUnnamed", x: struct{ a int }{}, y: struct{ a int }{}, wantPanic: strconv.Quote(reflect.TypeOf(namedWithUnexported{}).PkgPath()) + ".(struct { a int })", reason: "panic on unnamed struct type with unexported field", }, { label: label + "/UnaddressableStruct", x: struct{ s fmt.Stringer }{new(bytes.Buffer)}, y: struct{ s fmt.Stringer }{nil}, opts: []cmp.Option{ cmp.AllowUnexported(struct{ s fmt.Stringer }{}), cmp.FilterPath(func(p cmp.Path) bool { if _, ok := p.Last().(cmp.StructField); !ok { return false } t := p.Index(-1).Type() vx, vy := p.Index(-1).Values() pvx, pvy := p.Index(-2).Values() switch { case vx.Type() != t: panic(fmt.Sprintf("inconsistent type: %v != %v", vx.Type(), t)) case vy.Type() != t: panic(fmt.Sprintf("inconsistent type: %v != %v", vy.Type(), t)) case vx.CanAddr() != pvx.CanAddr(): panic(fmt.Sprintf("inconsistent addressability: %v != %v", vx.CanAddr(), pvx.CanAddr())) case vy.CanAddr() != pvy.CanAddr(): panic(fmt.Sprintf("inconsistent addressability: %v != %v", vy.CanAddr(), pvy.CanAddr())) } return true }, cmp.Ignore()), }, wantEqual: true, reason: "verify that exporter does not leak implementation details", }, { label: label + "/ErrorPanic", x: io.EOF, y: io.EOF, wantPanic: "consider using cmpopts.EquateErrors", reason: "suggest cmpopts.EquateErrors when accessing unexported fields of error types", }, { label: label + "/ErrorEqual", x: io.EOF, y: io.EOF, opts: []cmp.Option{cmpopts.EquateErrors()}, wantEqual: true, reason: "cmpopts.EquateErrors should equate these two errors as sentinel values", }} } func transformerTests() []test { type StringBytes struct { String string Bytes []byte } const label = "Transformer" transformOnce := func(name string, f interface{}) cmp.Option { xform := cmp.Transformer(name, f) return cmp.FilterPath(func(p cmp.Path) bool { for _, ps := range p { if tr, ok := ps.(cmp.Transform); ok && tr.Option() == xform { return false } } return true }, xform) } return []test{{ label: label + "/Uints", x: uint8(0), y: uint8(1), opts: []cmp.Option{ cmp.Transformer("λ", func(in uint8) uint16 { return uint16(in) }), cmp.Transformer("λ", func(in uint16) uint32 { return uint32(in) }), cmp.Transformer("λ", func(in uint32) uint64 { return uint64(in) }), }, wantEqual: false, reason: "transform uint8 -> uint16 -> uint32 -> uint64", }, { label: label + "/Ambiguous", x: 0, y: 1, opts: []cmp.Option{ cmp.Transformer("λ", func(in int) int { return in / 2 }), cmp.Transformer("λ", func(in int) int { return in }), }, wantPanic: "ambiguous set of applicable options", reason: "both transformers apply on int", }, { label: label + "/Filtered", x: []int{0, -5, 0, -1}, y: []int{1, 3, 0, -5}, opts: []cmp.Option{ cmp.FilterValues( func(x, y int) bool { return x+y >= 0 }, cmp.Transformer("λ", func(in int) int64 { return int64(in / 2) }), ), cmp.FilterValues( func(x, y int) bool { return x+y < 0 }, cmp.Transformer("λ", func(in int) int64 { return int64(in) }), ), }, wantEqual: false, reason: "disjoint transformers filtered based on the values", }, { label: label + "/DisjointOutput", x: 0, y: 1, opts: []cmp.Option{ cmp.Transformer("λ", func(in int) interface{} { if in == 0 { return "zero" } return float64(in) }), }, wantEqual: false, reason: "output type differs based on input value", }, { label: label + "/JSON", x: `{ "firstName": "John", "lastName": "Smith", "age": 25, "isAlive": true, "address": { "city": "Los Angeles", "postalCode": "10021-3100", "state": "CA", "streetAddress": "21 2nd Street" }, "phoneNumbers": [{ "type": "home", "number": "212 555-4321" },{ "type": "office", "number": "646 555-4567" },{ "number": "123 456-7890", "type": "mobile" }], "children": [] }`, y: `{"firstName":"John","lastName":"Smith","isAlive":true,"age":25, "address":{"streetAddress":"21 2nd Street","city":"New York", "state":"NY","postalCode":"10021-3100"},"phoneNumbers":[{"type":"home", "number":"212 555-1234"},{"type":"office","number":"646 555-4567"},{ "type":"mobile","number":"123 456-7890"}],"children":[],"spouse":null}`, opts: []cmp.Option{ transformOnce("ParseJSON", func(s string) (m map[string]interface{}) { if err := json.Unmarshal([]byte(s), &m); err != nil { panic(err) } return m }), }, wantEqual: false, reason: "transformer used to parse JSON input", }, { label: label + "/AcyclicString", x: StringBytes{String: "some\nmulti\nLine\nstring", Bytes: []byte("some\nmulti\nline\nbytes")}, y: StringBytes{String: "some\nmulti\nline\nstring", Bytes: []byte("some\nmulti\nline\nBytes")}, opts: []cmp.Option{ transformOnce("SplitString", func(s string) []string { return strings.Split(s, "\n") }), transformOnce("SplitBytes", func(b []byte) [][]byte { return bytes.Split(b, []byte("\n")) }), }, wantEqual: false, reason: "string -> []string and []byte -> [][]byte transformer only applied once", }, { label: label + "/CyclicString", x: "a\nb\nc\n", y: "a\nb\nc\n", opts: []cmp.Option{ cmp.Transformer("SplitLines", func(s string) []string { return strings.Split(s, "\n") }), }, wantPanic: "recursive set of Transformers detected", reason: "cyclic transformation from string -> []string -> string", }, { label: label + "/CyclicComplex", x: complex64(0), y: complex64(0), opts: []cmp.Option{ cmp.Transformer("T1", func(x complex64) complex128 { return complex128(x) }), cmp.Transformer("T2", func(x complex128) [2]float64 { return [2]float64{real(x), imag(x)} }), cmp.Transformer("T3", func(x float64) complex64 { return complex64(complex(x, 0)) }), }, wantPanic: "recursive set of Transformers detected", reason: "cyclic transformation from complex64 -> complex128 -> [2]float64 -> complex64", }} } func reporterTests() []test { const label = "Reporter" type ( MyString string MyByte byte MyBytes []byte MyInt int8 MyInts []int8 MyUint int16 MyUints []int16 MyFloat float32 MyFloats []float32 MyComposite struct { StringA string StringB MyString BytesA []byte BytesB []MyByte BytesC MyBytes IntsA []int8 IntsB []MyInt IntsC MyInts UintsA []uint16 UintsB []MyUint UintsC MyUints FloatsA []float32 FloatsB []MyFloat FloatsC MyFloats } PointerString *string ) return []test{{ label: label + "/PanicStringer", x: struct{ X fmt.Stringer }{struct{ fmt.Stringer }{nil}}, y: struct{ X fmt.Stringer }{bytes.NewBuffer(nil)}, wantEqual: false, reason: "panic from fmt.Stringer should not crash the reporter", }, { label: label + "/PanicError", x: struct{ X error }{struct{ error }{nil}}, y: struct{ X error }{errors.New("")}, wantEqual: false, reason: "panic from error should not crash the reporter", }, { label: label + "/AmbiguousType", x: foo1.Bar{}, y: foo2.Bar{}, wantEqual: false, reason: "reporter should display the qualified type name to disambiguate between the two values", }, { label: label + "/AmbiguousPointer", x: newInt(0), y: newInt(0), opts: []cmp.Option{ cmp.Comparer(func(x, y *int) bool { return x == y }), }, wantEqual: false, reason: "reporter should display the address to disambiguate between the two values", }, { label: label + "/AmbiguousPointerStruct", x: struct{ I *int }{newInt(0)}, y: struct{ I *int }{newInt(0)}, opts: []cmp.Option{ cmp.Comparer(func(x, y *int) bool { return x == y }), }, wantEqual: false, reason: "reporter should display the address to disambiguate between the two struct fields", }, { label: label + "/AmbiguousPointerSlice", x: []*int{newInt(0)}, y: []*int{newInt(0)}, opts: []cmp.Option{ cmp.Comparer(func(x, y *int) bool { return x == y }), }, wantEqual: false, reason: "reporter should display the address to disambiguate between the two slice elements", }, { label: label + "/AmbiguousPointerMap", x: map[string]*int{"zero": newInt(0)}, y: map[string]*int{"zero": newInt(0)}, opts: []cmp.Option{ cmp.Comparer(func(x, y *int) bool { return x == y }), }, wantEqual: false, reason: "reporter should display the address to disambiguate between the two map values", }, { label: label + "/AmbiguousStringer", x: Stringer("hello"), y: newStringer("hello"), wantEqual: false, reason: "reporter should avoid calling String to disambiguate between the two values", }, { label: label + "/AmbiguousStringerStruct", x: struct{ S fmt.Stringer }{Stringer("hello")}, y: struct{ S fmt.Stringer }{newStringer("hello")}, wantEqual: false, reason: "reporter should avoid calling String to disambiguate between the two struct fields", }, { label: label + "/AmbiguousStringerSlice", x: []fmt.Stringer{Stringer("hello")}, y: []fmt.Stringer{newStringer("hello")}, wantEqual: false, reason: "reporter should avoid calling String to disambiguate between the two slice elements", }, { label: label + "/AmbiguousStringerMap", x: map[string]fmt.Stringer{"zero": Stringer("hello")}, y: map[string]fmt.Stringer{"zero": newStringer("hello")}, wantEqual: false, reason: "reporter should avoid calling String to disambiguate between the two map values", }, { label: label + "/AmbiguousSliceHeader", x: make([]int, 0, 5), y: make([]int, 0, 1000), opts: []cmp.Option{ cmp.Comparer(func(x, y []int) bool { return cap(x) == cap(y) }), }, wantEqual: false, reason: "reporter should display the slice header to disambiguate between the two slice values", }, { label: label + "/AmbiguousStringerMapKey", x: map[interface{}]string{ nil: "nil", Stringer("hello"): "goodbye", foo1.Bar{"fizz"}: "buzz", }, y: map[interface{}]string{ newStringer("hello"): "goodbye", foo2.Bar{"fizz"}: "buzz", }, wantEqual: false, reason: "reporter should avoid calling String to disambiguate between the two map keys", }, { label: label + "/NonAmbiguousStringerMapKey", x: map[interface{}]string{Stringer("hello"): "goodbye"}, y: map[interface{}]string{newStringer("fizz"): "buzz"}, wantEqual: false, reason: "reporter should call String as there is no ambiguity between the two map keys", }, { label: label + "/InvalidUTF8", x: MyString("\xed\xa0\x80"), wantEqual: false, reason: "invalid UTF-8 should format as quoted string", }, { label: label + "/UnbatchedSlice", x: MyComposite{IntsA: []int8{11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29}}, y: MyComposite{IntsA: []int8{10, 11, 21, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29}}, wantEqual: false, reason: "unbatched diffing desired since few elements differ", }, { label: label + "/BatchedSlice", x: MyComposite{IntsA: []int8{10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29}}, y: MyComposite{IntsA: []int8{12, 29, 13, 27, 22, 23, 17, 18, 19, 20, 21, 10, 26, 16, 25, 28, 11, 15, 24, 14}}, wantEqual: false, reason: "batched diffing desired since many elements differ", }, { label: label + "/BatchedWithComparer", x: MyComposite{BytesA: []byte{10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29}}, y: MyComposite{BytesA: []byte{12, 29, 13, 27, 22, 23, 17, 18, 19, 20, 21, 10, 26, 16, 25, 28, 11, 15, 24, 14}}, wantEqual: false, opts: []cmp.Option{ cmp.Comparer(bytes.Equal), }, reason: "batched diffing desired since many elements differ", }, { label: label + "/BatchedLong", x: MyComposite{IntsA: []int8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127}}, wantEqual: false, reason: "batched output desired for a single slice of primitives unique to one of the inputs", }, { label: label + "/BatchedNamedAndUnnamed", x: MyComposite{ BytesA: []byte{1, 2, 3}, BytesB: []MyByte{4, 5, 6}, BytesC: MyBytes{7, 8, 9}, IntsA: []int8{-1, -2, -3}, IntsB: []MyInt{-4, -5, -6}, IntsC: MyInts{-7, -8, -9}, UintsA: []uint16{1000, 2000, 3000}, UintsB: []MyUint{4000, 5000, 6000}, UintsC: MyUints{7000, 8000, 9000}, FloatsA: []float32{1.5, 2.5, 3.5}, FloatsB: []MyFloat{4.5, 5.5, 6.5}, FloatsC: MyFloats{7.5, 8.5, 9.5}, }, y: MyComposite{ BytesA: []byte{3, 2, 1}, BytesB: []MyByte{6, 5, 4}, BytesC: MyBytes{9, 8, 7}, IntsA: []int8{-3, -2, -1}, IntsB: []MyInt{-6, -5, -4}, IntsC: MyInts{-9, -8, -7}, UintsA: []uint16{3000, 2000, 1000}, UintsB: []MyUint{6000, 5000, 4000}, UintsC: MyUints{9000, 8000, 7000}, FloatsA: []float32{3.5, 2.5, 1.5}, FloatsB: []MyFloat{6.5, 5.5, 4.5}, FloatsC: MyFloats{9.5, 8.5, 7.5}, }, wantEqual: false, reason: "batched diffing available for both named and unnamed slices", }, { label: label + "/BinaryHexdump", x: MyComposite{BytesA: []byte("\xf3\x0f\x8a\xa4\xd3\x12R\t$\xbeX\x95A\xfd$fX\x8byT\xac\r\xd8qwp\x20j\\s\u007f\x8c\x17U\xc04\xcen\xf7\xaaG\xee2\x9d\xc5\xca\x1eX\xaf\x8f'\xf3\x02J\x90\xedi.p2\xb4\xab0 \xb6\xbd\\b4\x17\xb0\x00\xbbO~'G\x06\xf4.f\xfdc\xd7\x04ݷ0\xb7\xd1U~{\xf6\xb3~\x1dWi \x9e\xbc\xdf\xe1M\xa9\xef\xa2\xd2\xed\xb4Gx\xc9\xc9'\xa4\xc6\xce\xecDp]")}, y: MyComposite{BytesA: []byte("\xf3\x0f\x8a\xa4\xd3\x12R\t$\xbeT\xac\r\xd8qwp\x20j\\s\u007f\x8c\x17U\xc04\xcen\xf7\xaaG\xee2\x9d\xc5\xca\x1eX\xaf\x8f'\xf3\x02J\x90\xedi.p2\xb4\xab0 \xb6\xbd\\b4\x17\xb0\x00\xbbO~'G\x06\xf4.f\xfdc\xd7\x04ݷ0\xb7\xd1u-[]]\xf6\xb3haha~\x1dWI \x9e\xbc\xdf\xe1M\xa9\xef\xa2\xd2\xed\xb4Gx\xc9\xc9'\xa4\xc6\xce\xecDp]")}, wantEqual: false, reason: "binary diff in hexdump form since data is binary data", }, { label: label + "/StringHexdump", x: MyComposite{StringB: MyString("readme.txt\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000600\x000000000\x000000000\x0000000000046\x0000000000000\x00011173\x00 0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ustar\x0000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000000\x000000000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")}, y: MyComposite{StringB: MyString("gopher.txt\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000600\x000000000\x000000000\x0000000000043\x0000000000000\x00011217\x00 0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ustar\x0000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000000\x000000000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")}, wantEqual: false, reason: "binary diff desired since string looks like binary data", }, { label: label + "/BinaryString", x: MyComposite{BytesA: []byte(`{"firstName":"John","lastName":"Smith","isAlive":true,"age":27,"address":{"streetAddress":"314 54th Avenue","city":"New York","state":"NY","postalCode":"10021-3100"},"phoneNumbers":[{"type":"home","number":"212 555-1234"},{"type":"office","number":"646 555-4567"},{"type":"mobile","number":"123 456-7890"}],"children":[],"spouse":null}`)}, y: MyComposite{BytesA: []byte(`{"firstName":"John","lastName":"Smith","isAlive":true,"age":27,"address":{"streetAddress":"21 2nd Street","city":"New York","state":"NY","postalCode":"10021-3100"},"phoneNumbers":[{"type":"home","number":"212 555-1234"},{"type":"office","number":"646 555-4567"},{"type":"mobile","number":"123 456-7890"}],"children":[],"spouse":null}`)}, wantEqual: false, reason: "batched textual diff desired since bytes looks like textual data", }, { label: label + "/TripleQuote", x: MyComposite{StringA: "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n"}, y: MyComposite{StringA: "aaa\nbbb\nCCC\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nrrr\nSSS\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n"}, wantEqual: false, reason: "use triple-quote syntax", }, { label: label + "/TripleQuoteSlice", x: []string{ "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", }, y: []string{ "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\n", "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", }, wantEqual: false, reason: "use triple-quote syntax for slices of strings", }, { label: label + "/TripleQuoteNamedTypes", x: MyComposite{ StringB: MyString("aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz"), BytesC: MyBytes("aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz"), }, y: MyComposite{ StringB: MyString("aaa\nbbb\nCCC\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nrrr\nSSS\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz"), BytesC: MyBytes("aaa\nbbb\nCCC\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nrrr\nSSS\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz"), }, wantEqual: false, reason: "use triple-quote syntax for named types", }, { label: label + "/TripleQuoteSliceNamedTypes", x: []MyString{ "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", }, y: []MyString{ "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\n", "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", }, wantEqual: false, reason: "use triple-quote syntax for slices of named strings", }, { label: label + "/TripleQuoteEndlines", x: "aaa\nbbb\nccc\nddd\neee\nfff\nggg\r\nhhh\n\riii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n\r", y: "aaa\nbbb\nCCC\nddd\neee\nfff\nggg\r\nhhh\n\riii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nrrr\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz", wantEqual: false, reason: "use triple-quote syntax", }, { label: label + "/AvoidTripleQuoteAmbiguousQuotes", x: "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", y: "aaa\nbbb\nCCC\nddd\neee\n\"\"\"\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nrrr\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", wantEqual: false, reason: "avoid triple-quote syntax due to presence of ambiguous triple quotes", }, { label: label + "/AvoidTripleQuoteAmbiguousEllipsis", x: "aaa\nbbb\nccc\n...\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", y: "aaa\nbbb\nCCC\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nrrr\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", wantEqual: false, reason: "avoid triple-quote syntax due to presence of ambiguous ellipsis", }, { label: label + "/AvoidTripleQuoteNonPrintable", x: "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", y: "aaa\nbbb\nCCC\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\no\roo\nppp\nqqq\nrrr\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", wantEqual: false, reason: "use triple-quote syntax", }, { label: label + "/AvoidTripleQuoteIdenticalWhitespace", x: "aaa\nbbb\nccc\n ddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nRRR\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", y: "aaa\nbbb\nccc \nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\nqqq\nrrr\nsss\nttt\nuuu\nvvv\nwww\nxxx\nyyy\nzzz\n", wantEqual: false, reason: "avoid triple-quote syntax due to visual equivalence of differences", }, { label: label + "/TripleQuoteStringer", x: []fmt.Stringer{ bytes.NewBuffer([]byte("package main\n\nimport (\n\t\"fmt\"\n)\n\nfunc main() {\n\tfmt.Println(\"Hello, playground\")\n}\n")), bytes.NewBuffer([]byte("package main\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n)\n\nfunc main() {\n\tfmt.Println(\"My favorite number is\", rand.Intn(10))\n}\n")), }, y: []fmt.Stringer{ bytes.NewBuffer([]byte("package main\n\nimport (\n\t\"fmt\"\n)\n\nfunc main() {\n\tfmt.Println(\"Hello, playground\")\n}\n")), bytes.NewBuffer([]byte("package main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\nfunc main() {\n\tfmt.Printf(\"Now you have %g problems.\\n\", math.Sqrt(7))\n}\n")), }, opts: []cmp.Option{cmp.Comparer(func(x, y fmt.Stringer) bool { return x.String() == y.String() })}, wantEqual: false, reason: "multi-line String output should be formatted with triple quote", }, { label: label + "/LimitMaximumBytesDiffs", x: []byte("\xcd====\x06\x1f\xc2\xcc\xc2-S=====\x1d\xdfa\xae\x98\x9fH======ǰ\xb7=======\xef====:\\\x94\xe6J\xc7=====\xb4======\n\n\xf7\x94===========\xf2\x9c\xc0f=====4\xf6\xf1\xc3\x17\x82======n\x16`\x91D\xc6\x06=======\x1cE====.===========\xc4\x18=======\x8a\x8d\x0e====\x87\xb1\xa5\x8e\xc3=====z\x0f1\xaeU======G,=======5\xe75\xee\x82\xf4\xce====\x11r===========\xaf]=======z\x05\xb3\x91\x88%\xd2====\n1\x89=====i\xb7\x055\xe6\x81\xd2=============\x883=@̾====\x14\x05\x96%^t\x04=====\xe7Ȉ\x90\x1d============="), y: []byte("\\====|\x96\xe7SB\xa0\xab=====\xf0\xbd\xa5q\xab\x17;======\xabP\x00=======\xeb====\xa5\x14\xe6O(\xe4=====(======/c@?===========\xd9x\xed\x13=====J\xfc\x918B\x8d======a8A\xebs\x04\xae=======\aC====\x1c===========\x91\"=======uؾ====s\xec\x845\a=====;\xabS9t======\x1f\x1b=======\x80\xab/\xed+:;====\xeaI===========\xabl=======\xb9\xe9\xfdH\x93\x8e\u007f====ח\xe5=====Ig\x88m\xf5\x01V=============\xf7+4\xb0\x92E====\x9fj\xf8&\xd0h\xf9=====\xeeΨ\r\xbf============="), wantEqual: false, reason: "total bytes difference output is truncated due to excessive number of differences", }, { label: label + "/LimitMaximumStringDiffs", x: "a\nb\nc\nd\ne\nf\ng\nh\ni\nj\nk\nl\nm\nn\no\np\nq\nr\ns\nt\nu\nv\nw\nx\ny\nz\nA\nB\nC\nD\nE\nF\nG\nH\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nU\nV\nW\nX\nY\nZ\n", y: "aa\nb\ncc\nd\nee\nf\ngg\nh\nii\nj\nkk\nl\nmm\nn\noo\np\nqq\nr\nss\nt\nuu\nv\nww\nx\nyy\nz\nAA\nB\nCC\nD\nEE\nF\nGG\nH\nII\nJ\nKK\nL\nMM\nN\nOO\nP\nQQ\nR\nSS\nT\nUU\nV\nWW\nX\nYY\nZ\n", wantEqual: false, reason: "total string difference output is truncated due to excessive number of differences", }, { label: label + "/LimitMaximumSliceDiffs", x: func() (out []struct{ S string }) { for _, s := range strings.Split("a\nb\nc\nd\ne\nf\ng\nh\ni\nj\nk\nl\nm\nn\no\np\nq\nr\ns\nt\nu\nv\nw\nx\ny\nz\nA\nB\nC\nD\nE\nF\nG\nH\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nU\nV\nW\nX\nY\nZ\n", "\n") { out = append(out, struct{ S string }{s}) } return out }(), y: func() (out []struct{ S string }) { for _, s := range strings.Split("aa\nb\ncc\nd\nee\nf\ngg\nh\nii\nj\nkk\nl\nmm\nn\noo\np\nqq\nr\nss\nt\nuu\nv\nww\nx\nyy\nz\nAA\nB\nCC\nD\nEE\nF\nGG\nH\nII\nJ\nKK\nL\nMM\nN\nOO\nP\nQQ\nR\nSS\nT\nUU\nV\nWW\nX\nYY\nZ\n", "\n") { out = append(out, struct{ S string }{s}) } return out }(), wantEqual: false, reason: "total slice difference output is truncated due to excessive number of differences", }, { label: label + "/MultilineString", x: MyComposite{ StringA: strings.TrimPrefix(` Package cmp determines equality of values. This package is intended to be a more powerful and safer alternative to reflect.DeepEqual for comparing whether two values are semantically equal. The primary features of cmp are: • When the default behavior of equality does not suit the needs of the test, custom equality functions can override the equality operation. For example, an equality function may report floats as equal so long as they are within some tolerance of each other. • Types that have an Equal method may use that method to determine equality. This allows package authors to determine the equality operation for the types that they define. • If no custom equality functions are used and no Equal method is defined, equality is determined by recursively comparing the primitive kinds on both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported fields are not compared by default; they result in panics unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared using the AllowUnexported option. `, "\n"), }, y: MyComposite{ StringA: strings.TrimPrefix(` Package cmp determines equality of value. This package is intended to be a more powerful and safer alternative to reflect.DeepEqual for comparing whether two values are semantically equal. The primary features of cmp are: • When the default behavior of equality does not suit the needs of the test, custom equality functions can override the equality operation. For example, an equality function may report floats as equal so long as they are within some tolerance of each other. • If no custom equality functions are used and no Equal method is defined, equality is determined by recursively comparing the primitive kinds on both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported fields are not compared by default; they result in panics unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared using the AllowUnexported option.`, "\n"), }, wantEqual: false, reason: "batched per-line diff desired since string looks like multi-line textual data", }, { label: label + "/Slices", x: MyComposite{ BytesA: []byte{1, 2, 3}, BytesB: []MyByte{4, 5, 6}, BytesC: MyBytes{7, 8, 9}, IntsA: []int8{-1, -2, -3}, IntsB: []MyInt{-4, -5, -6}, IntsC: MyInts{-7, -8, -9}, UintsA: []uint16{1000, 2000, 3000}, UintsB: []MyUint{4000, 5000, 6000}, UintsC: MyUints{7000, 8000, 9000}, FloatsA: []float32{1.5, 2.5, 3.5}, FloatsB: []MyFloat{4.5, 5.5, 6.5}, FloatsC: MyFloats{7.5, 8.5, 9.5}, }, y: MyComposite{}, wantEqual: false, reason: "batched diffing for non-nil slices and nil slices", }, { label: label + "/EmptySlices", x: MyComposite{ BytesA: []byte{}, BytesB: []MyByte{}, BytesC: MyBytes{}, IntsA: []int8{}, IntsB: []MyInt{}, IntsC: MyInts{}, UintsA: []uint16{}, UintsB: []MyUint{}, UintsC: MyUints{}, FloatsA: []float32{}, FloatsB: []MyFloat{}, FloatsC: MyFloats{}, }, y: MyComposite{}, wantEqual: false, reason: "batched diffing for empty slices and nil slices", }, { label: label + "/LargeMapKey", x: map[*[]byte]int{func() *[]byte { b := make([]byte, 1<<20) return &b }(): 0}, y: map[*[]byte]int{func() *[]byte { b := make([]byte, 1<<20) return &b }(): 0}, reason: "printing map keys should have some verbosity limit imposed", }, { label: label + "/LargeStringInInterface", x: struct{ X interface{} }{"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam sit amet pretium ligula, at gravida quam. Integer iaculis, velit at sagittis ultricies, lacus metus scelerisque turpis, ornare feugiat nulla nisl ac erat. Maecenas elementum ultricies libero, sed efficitur lacus molestie non. Nulla ac pretium dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Pellentesque mi lorem, consectetur id porttitor id, sollicitudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis."}, y: struct{ X interface{} }{"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam sit amet pretium ligula, at gravida quam. Integer iaculis, velit at sagittis ultricies, lacus metus scelerisque turpis, ornare feugiat nulla nisl ac erat. Maecenas elementum ultricies libero, sed efficitur lacus molestie non. Nulla ac pretium dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Pellentesque mi lorem, consectetur id porttitor id, sollicitudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis,"}, reason: "strings within an interface should benefit from specialized diffing", }, { label: label + "/LargeBytesInInterface", x: struct{ X interface{} }{[]byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam sit amet pretium ligula, at gravida quam. Integer iaculis, velit at sagittis ultricies, lacus metus scelerisque turpis, ornare feugiat nulla nisl ac erat. Maecenas elementum ultricies libero, sed efficitur lacus molestie non. Nulla ac pretium dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Pellentesque mi lorem, consectetur id porttitor id, sollicitudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis.")}, y: struct{ X interface{} }{[]byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam sit amet pretium ligula, at gravida quam. Integer iaculis, velit at sagittis ultricies, lacus metus scelerisque turpis, ornare feugiat nulla nisl ac erat. Maecenas elementum ultricies libero, sed efficitur lacus molestie non. Nulla ac pretium dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Pellentesque mi lorem, consectetur id porttitor id, sollicitudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis,")}, reason: "bytes slice within an interface should benefit from specialized diffing", }, { label: label + "/LargeStandaloneString", x: struct{ X interface{} }{[1]string{"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam sit amet pretium ligula, at gravida quam. Integer iaculis, velit at sagittis ultricies, lacus metus scelerisque turpis, ornare feugiat nulla nisl ac erat. Maecenas elementum ultricies libero, sed efficitur lacus molestie non. Nulla ac pretium dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Pellentesque mi lorem, consectetur id porttitor id, sollicitudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis."}}, y: struct{ X interface{} }{[1]string{"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam sit amet pretium ligula, at gravida quam. Integer iaculis, velit at sagittis ultricies, lacus metus scelerisque turpis, ornare feugiat nulla nisl ac erat. Maecenas elementum ultricies libero, sed efficitur lacus molestie non. Nulla ac pretium dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Pellentesque mi lorem, consectetur id porttitor id, sollicitudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis,"}}, reason: "printing a large standalone string that is different should print enough context to see the difference", }, { label: label + "/SurroundingEqualElements", x: "org-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=aa,#=_value _value=2 11\torg-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=bb,#=_value _value=2 21\torg-4747474747474747,bucket-4242424242424242:m,tag1=b,tag2=cc,#=_value _value=1 21\torg-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=dd,#=_value _value=3 31\torg-4747474747474747,bucket-4242424242424242:m,tag1=c,#=_value _value=4 41\t", y: "org-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=aa _value=2 11\torg-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=bb _value=2 21\torg-4747474747474747,bucket-4242424242424242:m,tag1=b,tag2=cc _value=1 21\torg-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=dd _value=3 31\torg-4747474747474747,bucket-4242424242424242:m,tag1=c _value=4 41\t", reason: "leading/trailing equal spans should not appear in diff lines", }, { label: label + "/MostlyTextString", x: "org-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=aa,\xff=_value _value=2 11\norg-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=bb,\xff=_value _value=2 21\norg-4747474747474747,bucket-4242424242424242:m,tag1=b,tag2=cc,\xff=_value _value=1 21\norg-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=dd,\xff=_value _value=3 31\norg-4747474747474747,bucket-4242424242424242:m,tag1=c,\xff=_value _value=4 41\n", y: "org-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=aa _value=2 11\norg-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=bb _value=2 21\norg-4747474747474747,bucket-4242424242424242:m,tag1=b,tag2=cc _value=1 21\norg-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=dd _value=3 31\norg-4747474747474747,bucket-4242424242424242:m,tag1=c _value=4 41\n", reason: "the presence of a few invalid UTF-8 characters should not prevent printing this as text", }, { label: label + "/AllLinesDiffer", x: "d5c14bdf6bac81c27afc5429500ed750\n25483503b557c606dad4f144d27ae10b\n90bdbcdbb6ea7156068e3dcfb7459244\n978f480a6e3cced51e297fbff9a506b7\n", y: "Xd5c14bdf6bac81c27afc5429500ed750\nX25483503b557c606dad4f144d27ae10b\nX90bdbcdbb6ea7156068e3dcfb7459244\nX978f480a6e3cced51e297fbff9a506b7\n", reason: "all lines are different, so diffing based on lines is pointless", }, { label: label + "/StringifiedBytes", x: struct{ X []byte }{[]byte("hello, world!")}, y: struct{ X []byte }{}, reason: "[]byte should be printed as text since it is printable text", }, { label: label + "/NonStringifiedBytes", x: struct{ X []byte }{[]byte("\xde\xad\xbe\xef")}, y: struct{ X []byte }{}, reason: "[]byte should not be printed as text since it is binary data", }, { label: label + "/StringifiedNamedBytes", x: struct{ X MyBytes }{MyBytes("hello, world!")}, y: struct{ X MyBytes }{}, reason: "MyBytes should be printed as text since it is printable text", }, { label: label + "/NonStringifiedNamedBytes", x: struct{ X MyBytes }{MyBytes("\xde\xad\xbe\xef")}, y: struct{ X MyBytes }{}, reason: "MyBytes should not be printed as text since it is binary data", }, { label: label + "/ShortJSON", x: `{ "id": 1, "foo": true, "bar": true, }`, y: `{ "id": 1434180, "foo": true, "bar": true, }`, reason: "short multiline JSON should prefer triple-quoted string diff as it is more readable", }, { label: label + "/PointerToStringOrAny", x: func() *string { var v string = "hello" return &v }(), y: func() *interface{} { var v interface{} = "hello" return &v }(), reason: "mismatched types between any and *any should print differently", }, { label: label + "/NamedPointer", x: func() *string { v := "hello" return &v }(), y: func() PointerString { v := "hello" return &v }(), reason: "mismatched pointer types should print differently", }, { label: label + "/MapStringAny", x: map[string]interface{}{"key": int(0)}, y: map[string]interface{}{"key": uint(0)}, reason: "mismatched underlying value within interface", }, { label: label + "/StructFieldAny", x: struct{ X interface{} }{int(0)}, y: struct{ X interface{} }{uint(0)}, reason: "mismatched underlying value within interface", }, { label: label + "/SliceOfBytesText", x: [][]byte{ []byte("hello"), []byte("foo"), []byte("barbaz"), []byte("blahdieblah"), }, y: [][]byte{ []byte("foo"), []byte("foo"), []byte("barbaz"), []byte("added"), []byte("here"), []byte("hrmph"), }, reason: "should print text byte slices as strings", }, { label: label + "/SliceOfBytesBinary", x: [][]byte{ []byte("\xde\xad\xbe\xef"), []byte("\xffoo"), []byte("barbaz"), []byte("blahdieblah"), }, y: [][]byte{ []byte("\xffoo"), []byte("foo"), []byte("barbaz"), []byte("added"), []byte("here"), []byte("hrmph\xff"), }, reason: "should print text byte slices as strings except those with binary", }, { label: label + "/ManyEscapeCharacters", x: `[ {"Base32": "NA======"}, {"Base32": "NBSQ===="}, {"Base32": "NBSWY==="}, {"Base32": "NBSWY3A="}, {"Base32": "NBSWY3DP"} ]`, y: `[ {"Base32": "NB======"}, {"Base32": "NBSQ===="}, {"Base32": "NBSWY==="}, {"Base32": "NBSWY3A="}, {"Base32": "NBSWY3DP"} ]`, reason: "should use line-based diffing since byte-based diffing is unreadable due to heavy amounts of escaping", }} } func embeddedTests() []test { const label = "EmbeddedStruct" privateStruct := *new(ts.ParentStructA).PrivateStruct() createStructA := func(i int) ts.ParentStructA { s := ts.ParentStructA{} s.PrivateStruct().Public = 1 + i s.PrivateStruct().SetPrivate(2 + i) return s } createStructB := func(i int) ts.ParentStructB { s := ts.ParentStructB{} s.PublicStruct.Public = 1 + i s.PublicStruct.SetPrivate(2 + i) return s } createStructC := func(i int) ts.ParentStructC { s := ts.ParentStructC{} s.PrivateStruct().Public = 1 + i s.PrivateStruct().SetPrivate(2 + i) s.Public = 3 + i s.SetPrivate(4 + i) return s } createStructD := func(i int) ts.ParentStructD { s := ts.ParentStructD{} s.PublicStruct.Public = 1 + i s.PublicStruct.SetPrivate(2 + i) s.Public = 3 + i s.SetPrivate(4 + i) return s } createStructE := func(i int) ts.ParentStructE { s := ts.ParentStructE{} s.PrivateStruct().Public = 1 + i s.PrivateStruct().SetPrivate(2 + i) s.PublicStruct.Public = 3 + i s.PublicStruct.SetPrivate(4 + i) return s } createStructF := func(i int) ts.ParentStructF { s := ts.ParentStructF{} s.PrivateStruct().Public = 1 + i s.PrivateStruct().SetPrivate(2 + i) s.PublicStruct.Public = 3 + i s.PublicStruct.SetPrivate(4 + i) s.Public = 5 + i s.SetPrivate(6 + i) return s } createStructG := func(i int) *ts.ParentStructG { s := ts.NewParentStructG() s.PrivateStruct().Public = 1 + i s.PrivateStruct().SetPrivate(2 + i) return s } createStructH := func(i int) *ts.ParentStructH { s := ts.NewParentStructH() s.PublicStruct.Public = 1 + i s.PublicStruct.SetPrivate(2 + i) return s } createStructI := func(i int) *ts.ParentStructI { s := ts.NewParentStructI() s.PrivateStruct().Public = 1 + i s.PrivateStruct().SetPrivate(2 + i) s.PublicStruct.Public = 3 + i s.PublicStruct.SetPrivate(4 + i) return s } createStructJ := func(i int) *ts.ParentStructJ { s := ts.NewParentStructJ() s.PrivateStruct().Public = 1 + i s.PrivateStruct().SetPrivate(2 + i) s.PublicStruct.Public = 3 + i s.PublicStruct.SetPrivate(4 + i) s.Private().Public = 5 + i s.Private().SetPrivate(6 + i) s.Public.Public = 7 + i s.Public.SetPrivate(8 + i) return s } return []test{{ label: label + "/ParentStructA/PanicUnexported1", x: ts.ParentStructA{}, y: ts.ParentStructA{}, wantPanic: "cannot handle unexported field", reason: "ParentStructA has an unexported field", }, { label: label + "/ParentStructA/Ignored", x: ts.ParentStructA{}, y: ts.ParentStructA{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructA{}), }, wantEqual: true, reason: "the only field (which is unexported) of ParentStructA is ignored", }, { label: label + "/ParentStructA/PanicUnexported2", x: createStructA(0), y: createStructA(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructA{}), }, wantPanic: "cannot handle unexported field", reason: "privateStruct also has unexported fields", }, { label: label + "/ParentStructA/Equal", x: createStructA(0), y: createStructA(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructA{}, privateStruct), }, wantEqual: true, reason: "unexported fields of both ParentStructA and privateStruct are allowed", }, { label: label + "/ParentStructA/Inequal", x: createStructA(0), y: createStructA(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructA{}, privateStruct), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructB/PanicUnexported1", x: ts.ParentStructB{}, y: ts.ParentStructB{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructB{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct has an unexported field", }, { label: label + "/ParentStructB/Ignored", x: ts.ParentStructB{}, y: ts.ParentStructB{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructB{}), cmpopts.IgnoreUnexported(ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of both ParentStructB and PublicStruct are ignored", }, { label: label + "/ParentStructB/PanicUnexported2", x: createStructB(0), y: createStructB(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructB{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct also has unexported fields", }, { label: label + "/ParentStructB/Equal", x: createStructB(0), y: createStructB(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructB{}, ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of both ParentStructB and PublicStruct are allowed", }, { label: label + "/ParentStructB/Inequal", x: createStructB(0), y: createStructB(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructB{}, ts.PublicStruct{}), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructC/PanicUnexported1", x: ts.ParentStructC{}, y: ts.ParentStructC{}, wantPanic: "cannot handle unexported field", reason: "ParentStructC has unexported fields", }, { label: label + "/ParentStructC/Ignored", x: ts.ParentStructC{}, y: ts.ParentStructC{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructC{}), }, wantEqual: true, reason: "unexported fields of ParentStructC are ignored", }, { label: label + "/ParentStructC/PanicUnexported2", x: createStructC(0), y: createStructC(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructC{}), }, wantPanic: "cannot handle unexported field", reason: "privateStruct also has unexported fields", }, { label: label + "/ParentStructC/Equal", x: createStructC(0), y: createStructC(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructC{}, privateStruct), }, wantEqual: true, reason: "unexported fields of both ParentStructC and privateStruct are allowed", }, { label: label + "/ParentStructC/Inequal", x: createStructC(0), y: createStructC(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructC{}, privateStruct), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructD/PanicUnexported1", x: ts.ParentStructD{}, y: ts.ParentStructD{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructD{}), }, wantPanic: "cannot handle unexported field", reason: "ParentStructD has unexported fields", }, { label: label + "/ParentStructD/Ignored", x: ts.ParentStructD{}, y: ts.ParentStructD{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructD{}), cmpopts.IgnoreUnexported(ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of ParentStructD and PublicStruct are ignored", }, { label: label + "/ParentStructD/PanicUnexported2", x: createStructD(0), y: createStructD(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructD{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct also has unexported fields", }, { label: label + "/ParentStructD/Equal", x: createStructD(0), y: createStructD(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructD{}, ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of both ParentStructD and PublicStruct are allowed", }, { label: label + "/ParentStructD/Inequal", x: createStructD(0), y: createStructD(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructD{}, ts.PublicStruct{}), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructE/PanicUnexported1", x: ts.ParentStructE{}, y: ts.ParentStructE{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructE{}), }, wantPanic: "cannot handle unexported field", reason: "ParentStructE has unexported fields", }, { label: label + "/ParentStructE/Ignored", x: ts.ParentStructE{}, y: ts.ParentStructE{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructE{}), cmpopts.IgnoreUnexported(ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of ParentStructE and PublicStruct are ignored", }, { label: label + "/ParentStructE/PanicUnexported2", x: createStructE(0), y: createStructE(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructE{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct and privateStruct also has unexported fields", }, { label: label + "/ParentStructE/PanicUnexported3", x: createStructE(0), y: createStructE(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructE{}, ts.PublicStruct{}), }, wantPanic: "cannot handle unexported field", reason: "privateStruct also has unexported fields", }, { label: label + "/ParentStructE/Equal", x: createStructE(0), y: createStructE(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructE{}, ts.PublicStruct{}, privateStruct), }, wantEqual: true, reason: "unexported fields of both ParentStructE, PublicStruct, and privateStruct are allowed", }, { label: label + "/ParentStructE/Inequal", x: createStructE(0), y: createStructE(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructE{}, ts.PublicStruct{}, privateStruct), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructF/PanicUnexported1", x: ts.ParentStructF{}, y: ts.ParentStructF{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructF{}), }, wantPanic: "cannot handle unexported field", reason: "ParentStructF has unexported fields", }, { label: label + "/ParentStructF/Ignored", x: ts.ParentStructF{}, y: ts.ParentStructF{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructF{}), cmpopts.IgnoreUnexported(ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of ParentStructF and PublicStruct are ignored", }, { label: label + "/ParentStructF/PanicUnexported2", x: createStructF(0), y: createStructF(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructF{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct and privateStruct also has unexported fields", }, { label: label + "/ParentStructF/PanicUnexported3", x: createStructF(0), y: createStructF(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructF{}, ts.PublicStruct{}), }, wantPanic: "cannot handle unexported field", reason: "privateStruct also has unexported fields", }, { label: label + "/ParentStructF/Equal", x: createStructF(0), y: createStructF(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructF{}, ts.PublicStruct{}, privateStruct), }, wantEqual: true, reason: "unexported fields of both ParentStructF, PublicStruct, and privateStruct are allowed", }, { label: label + "/ParentStructF/Inequal", x: createStructF(0), y: createStructF(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructF{}, ts.PublicStruct{}, privateStruct), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructG/PanicUnexported1", x: ts.ParentStructG{}, y: ts.ParentStructG{}, wantPanic: "cannot handle unexported field", reason: "ParentStructG has unexported fields", }, { label: label + "/ParentStructG/Ignored", x: ts.ParentStructG{}, y: ts.ParentStructG{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructG{}), }, wantEqual: true, reason: "unexported fields of ParentStructG are ignored", }, { label: label + "/ParentStructG/PanicUnexported2", x: createStructG(0), y: createStructG(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructG{}), }, wantPanic: "cannot handle unexported field", reason: "privateStruct also has unexported fields", }, { label: label + "/ParentStructG/Equal", x: createStructG(0), y: createStructG(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructG{}, privateStruct), }, wantEqual: true, reason: "unexported fields of both ParentStructG and privateStruct are allowed", }, { label: label + "/ParentStructG/Inequal", x: createStructG(0), y: createStructG(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructG{}, privateStruct), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructH/EqualNil", x: ts.ParentStructH{}, y: ts.ParentStructH{}, wantEqual: true, reason: "PublicStruct is not compared because the pointer is nil", }, { label: label + "/ParentStructH/PanicUnexported1", x: createStructH(0), y: createStructH(0), wantPanic: "cannot handle unexported field", reason: "PublicStruct has unexported fields", }, { label: label + "/ParentStructH/Ignored", x: ts.ParentStructH{}, y: ts.ParentStructH{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructH{}), }, wantEqual: true, reason: "unexported fields of ParentStructH are ignored (it has none)", }, { label: label + "/ParentStructH/PanicUnexported2", x: createStructH(0), y: createStructH(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructH{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct also has unexported fields", }, { label: label + "/ParentStructH/Equal", x: createStructH(0), y: createStructH(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructH{}, ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of both ParentStructH and PublicStruct are allowed", }, { label: label + "/ParentStructH/Inequal", x: createStructH(0), y: createStructH(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructH{}, ts.PublicStruct{}), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructI/PanicUnexported1", x: ts.ParentStructI{}, y: ts.ParentStructI{}, wantPanic: "cannot handle unexported field", reason: "ParentStructI has unexported fields", }, { label: label + "/ParentStructI/Ignored1", x: ts.ParentStructI{}, y: ts.ParentStructI{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructI{}), }, wantEqual: true, reason: "unexported fields of ParentStructI are ignored", }, { label: label + "/ParentStructI/PanicUnexported2", x: createStructI(0), y: createStructI(0), opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructI{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct and privateStruct also has unexported fields", }, { label: label + "/ParentStructI/Ignored2", x: createStructI(0), y: createStructI(0), opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructI{}, ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of ParentStructI and PublicStruct are ignored", }, { label: label + "/ParentStructI/PanicUnexported3", x: createStructI(0), y: createStructI(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructI{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct and privateStruct also has unexported fields", }, { label: label + "/ParentStructI/Equal", x: createStructI(0), y: createStructI(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructI{}, ts.PublicStruct{}, privateStruct), }, wantEqual: true, reason: "unexported fields of both ParentStructI, PublicStruct, and privateStruct are allowed", }, { label: label + "/ParentStructI/Inequal", x: createStructI(0), y: createStructI(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructI{}, ts.PublicStruct{}, privateStruct), }, wantEqual: false, reason: "the two values differ on some fields", }, { label: label + "/ParentStructJ/PanicUnexported1", x: ts.ParentStructJ{}, y: ts.ParentStructJ{}, wantPanic: "cannot handle unexported field", reason: "ParentStructJ has unexported fields", }, { label: label + "/ParentStructJ/PanicUnexported2", x: ts.ParentStructJ{}, y: ts.ParentStructJ{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructJ{}), }, wantPanic: "cannot handle unexported field", reason: "PublicStruct and privateStruct also has unexported fields", }, { label: label + "/ParentStructJ/Ignored", x: ts.ParentStructJ{}, y: ts.ParentStructJ{}, opts: []cmp.Option{ cmpopts.IgnoreUnexported(ts.ParentStructJ{}, ts.PublicStruct{}), }, wantEqual: true, reason: "unexported fields of ParentStructJ and PublicStruct are ignored", }, { label: label + "/ParentStructJ/PanicUnexported3", x: createStructJ(0), y: createStructJ(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructJ{}, ts.PublicStruct{}), }, wantPanic: "cannot handle unexported field", reason: "privateStruct also has unexported fields", }, { label: label + "/ParentStructJ/Equal", x: createStructJ(0), y: createStructJ(0), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructJ{}, ts.PublicStruct{}, privateStruct), }, wantEqual: true, reason: "unexported fields of both ParentStructJ, PublicStruct, and privateStruct are allowed", }, { label: label + "/ParentStructJ/Inequal", x: createStructJ(0), y: createStructJ(1), opts: []cmp.Option{ cmp.AllowUnexported(ts.ParentStructJ{}, ts.PublicStruct{}, privateStruct), }, wantEqual: false, reason: "the two values differ on some fields", }} } func methodTests() []test { const label = "EqualMethod" // A common mistake that the Equal method is on a pointer receiver, // but only a non-pointer value is present in the struct. // A transform can be used to forcibly reference the value. addrTransform := cmp.FilterPath(func(p cmp.Path) bool { if len(p) == 0 { return false } t := p[len(p)-1].Type() if _, ok := t.MethodByName("Equal"); ok || t.Kind() == reflect.Ptr { return false } if m, ok := reflect.PtrTo(t).MethodByName("Equal"); ok { tf := m.Func.Type() return !tf.IsVariadic() && tf.NumIn() == 2 && tf.NumOut() == 1 && tf.In(0).AssignableTo(tf.In(1)) && tf.Out(0) == reflect.TypeOf(true) } return false }, cmp.Transformer("Addr", func(x interface{}) interface{} { v := reflect.ValueOf(x) vp := reflect.New(v.Type()) vp.Elem().Set(v) return vp.Interface() })) // For each of these types, there is an Equal method defined, which always // returns true, while the underlying data are fundamentally different. // Since the method should be called, these are expected to be equal. return []test{{ label: label + "/StructA/ValueEqual", x: ts.StructA{X: "NotEqual"}, y: ts.StructA{X: "not_equal"}, wantEqual: true, reason: "Equal method on StructA value called", }, { label: label + "/StructA/PointerEqual", x: &ts.StructA{X: "NotEqual"}, y: &ts.StructA{X: "not_equal"}, wantEqual: true, reason: "Equal method on StructA pointer called", }, { label: label + "/StructB/ValueInequal", x: ts.StructB{X: "NotEqual"}, y: ts.StructB{X: "not_equal"}, wantEqual: false, reason: "Equal method on StructB value not called", }, { label: label + "/StructB/ValueAddrEqual", x: ts.StructB{X: "NotEqual"}, y: ts.StructB{X: "not_equal"}, opts: []cmp.Option{addrTransform}, wantEqual: true, reason: "Equal method on StructB pointer called due to shallow copy transform", }, { label: label + "/StructB/PointerEqual", x: &ts.StructB{X: "NotEqual"}, y: &ts.StructB{X: "not_equal"}, wantEqual: true, reason: "Equal method on StructB pointer called", }, { label: label + "/StructC/ValueEqual", x: ts.StructC{X: "NotEqual"}, y: ts.StructC{X: "not_equal"}, wantEqual: true, reason: "Equal method on StructC value called", }, { label: label + "/StructC/PointerEqual", x: &ts.StructC{X: "NotEqual"}, y: &ts.StructC{X: "not_equal"}, wantEqual: true, reason: "Equal method on StructC pointer called", }, { label: label + "/StructD/ValueInequal", x: ts.StructD{X: "NotEqual"}, y: ts.StructD{X: "not_equal"}, wantEqual: false, reason: "Equal method on StructD value not called", }, { label: label + "/StructD/ValueAddrEqual", x: ts.StructD{X: "NotEqual"}, y: ts.StructD{X: "not_equal"}, opts: []cmp.Option{addrTransform}, wantEqual: true, reason: "Equal method on StructD pointer called due to shallow copy transform", }, { label: label + "/StructD/PointerEqual", x: &ts.StructD{X: "NotEqual"}, y: &ts.StructD{X: "not_equal"}, wantEqual: true, reason: "Equal method on StructD pointer called", }, { label: label + "/StructE/ValueInequal", x: ts.StructE{X: "NotEqual"}, y: ts.StructE{X: "not_equal"}, wantEqual: false, reason: "Equal method on StructE value not called", }, { label: label + "/StructE/ValueAddrEqual", x: ts.StructE{X: "NotEqual"}, y: ts.StructE{X: "not_equal"}, opts: []cmp.Option{addrTransform}, wantEqual: true, reason: "Equal method on StructE pointer called due to shallow copy transform", }, { label: label + "/StructE/PointerEqual", x: &ts.StructE{X: "NotEqual"}, y: &ts.StructE{X: "not_equal"}, wantEqual: true, reason: "Equal method on StructE pointer called", }, { label: label + "/StructF/ValueInequal", x: ts.StructF{X: "NotEqual"}, y: ts.StructF{X: "not_equal"}, wantEqual: false, reason: "Equal method on StructF value not called", }, { label: label + "/StructF/PointerEqual", x: &ts.StructF{X: "NotEqual"}, y: &ts.StructF{X: "not_equal"}, wantEqual: true, reason: "Equal method on StructF pointer called", }, { label: label + "/StructA1/ValueEqual", x: ts.StructA1{StructA: ts.StructA{X: "NotEqual"}, X: "equal"}, y: ts.StructA1{StructA: ts.StructA{X: "not_equal"}, X: "equal"}, wantEqual: true, reason: "Equal method on StructA value called with equal X field", }, { label: label + "/StructA1/ValueInequal", x: ts.StructA1{StructA: ts.StructA{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructA1{StructA: ts.StructA{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructA value called, but inequal X field", }, { label: label + "/StructA1/PointerEqual", x: &ts.StructA1{StructA: ts.StructA{X: "NotEqual"}, X: "equal"}, y: &ts.StructA1{StructA: ts.StructA{X: "not_equal"}, X: "equal"}, wantEqual: true, reason: "Equal method on StructA value called with equal X field", }, { label: label + "/StructA1/PointerInequal", x: &ts.StructA1{StructA: ts.StructA{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructA1{StructA: ts.StructA{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructA value called, but inequal X field", }, { label: label + "/StructB1/ValueEqual", x: ts.StructB1{StructB: ts.StructB{X: "NotEqual"}, X: "equal"}, y: ts.StructB1{StructB: ts.StructB{X: "not_equal"}, X: "equal"}, opts: []cmp.Option{addrTransform}, wantEqual: true, reason: "Equal method on StructB pointer called due to shallow copy transform with equal X field", }, { label: label + "/StructB1/ValueInequal", x: ts.StructB1{StructB: ts.StructB{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructB1{StructB: ts.StructB{X: "not_equal"}, X: "not_equal"}, opts: []cmp.Option{addrTransform}, wantEqual: false, reason: "Equal method on StructB pointer called due to shallow copy transform, but inequal X field", }, { label: label + "/StructB1/PointerEqual", x: &ts.StructB1{StructB: ts.StructB{X: "NotEqual"}, X: "equal"}, y: &ts.StructB1{StructB: ts.StructB{X: "not_equal"}, X: "equal"}, opts: []cmp.Option{addrTransform}, wantEqual: true, reason: "Equal method on StructB pointer called due to shallow copy transform with equal X field", }, { label: label + "/StructB1/PointerInequal", x: &ts.StructB1{StructB: ts.StructB{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructB1{StructB: ts.StructB{X: "not_equal"}, X: "not_equal"}, opts: []cmp.Option{addrTransform}, wantEqual: false, reason: "Equal method on StructB pointer called due to shallow copy transform, but inequal X field", }, { label: label + "/StructC1/ValueEqual", x: ts.StructC1{StructC: ts.StructC{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructC1{StructC: ts.StructC{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method on StructC1 value called", }, { label: label + "/StructC1/PointerEqual", x: &ts.StructC1{StructC: ts.StructC{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructC1{StructC: ts.StructC{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method on StructC1 pointer called", }, { label: label + "/StructD1/ValueInequal", x: ts.StructD1{StructD: ts.StructD{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructD1{StructD: ts.StructD{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructD1 value not called", }, { label: label + "/StructD1/PointerAddrEqual", x: ts.StructD1{StructD: ts.StructD{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructD1{StructD: ts.StructD{X: "not_equal"}, X: "not_equal"}, opts: []cmp.Option{addrTransform}, wantEqual: true, reason: "Equal method on StructD1 pointer called due to shallow copy transform", }, { label: label + "/StructD1/PointerEqual", x: &ts.StructD1{StructD: ts.StructD{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructD1{StructD: ts.StructD{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method on StructD1 pointer called", }, { label: label + "/StructE1/ValueInequal", x: ts.StructE1{StructE: ts.StructE{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructE1{StructE: ts.StructE{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructE1 value not called", }, { label: label + "/StructE1/ValueAddrEqual", x: ts.StructE1{StructE: ts.StructE{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructE1{StructE: ts.StructE{X: "not_equal"}, X: "not_equal"}, opts: []cmp.Option{addrTransform}, wantEqual: true, reason: "Equal method on StructE1 pointer called due to shallow copy transform", }, { label: label + "/StructE1/PointerEqual", x: &ts.StructE1{StructE: ts.StructE{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructE1{StructE: ts.StructE{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method on StructE1 pointer called", }, { label: label + "/StructF1/ValueInequal", x: ts.StructF1{StructF: ts.StructF{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructF1{StructF: ts.StructF{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructF1 value not called", }, { label: label + "/StructF1/PointerEqual", x: &ts.StructF1{StructF: ts.StructF{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructF1{StructF: ts.StructF{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method on StructF1 pointer called", }, { label: label + "/StructA2/ValueEqual", x: ts.StructA2{StructA: &ts.StructA{X: "NotEqual"}, X: "equal"}, y: ts.StructA2{StructA: &ts.StructA{X: "not_equal"}, X: "equal"}, wantEqual: true, reason: "Equal method on StructA pointer called with equal X field", }, { label: label + "/StructA2/ValueInequal", x: ts.StructA2{StructA: &ts.StructA{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructA2{StructA: &ts.StructA{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructA pointer called, but inequal X field", }, { label: label + "/StructA2/PointerEqual", x: &ts.StructA2{StructA: &ts.StructA{X: "NotEqual"}, X: "equal"}, y: &ts.StructA2{StructA: &ts.StructA{X: "not_equal"}, X: "equal"}, wantEqual: true, reason: "Equal method on StructA pointer called with equal X field", }, { label: label + "/StructA2/PointerInequal", x: &ts.StructA2{StructA: &ts.StructA{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructA2{StructA: &ts.StructA{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructA pointer called, but inequal X field", }, { label: label + "/StructB2/ValueEqual", x: ts.StructB2{StructB: &ts.StructB{X: "NotEqual"}, X: "equal"}, y: ts.StructB2{StructB: &ts.StructB{X: "not_equal"}, X: "equal"}, wantEqual: true, reason: "Equal method on StructB pointer called with equal X field", }, { label: label + "/StructB2/ValueInequal", x: ts.StructB2{StructB: &ts.StructB{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructB2{StructB: &ts.StructB{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructB pointer called, but inequal X field", }, { label: label + "/StructB2/PointerEqual", x: &ts.StructB2{StructB: &ts.StructB{X: "NotEqual"}, X: "equal"}, y: &ts.StructB2{StructB: &ts.StructB{X: "not_equal"}, X: "equal"}, wantEqual: true, reason: "Equal method on StructB pointer called with equal X field", }, { label: label + "/StructB2/PointerInequal", x: &ts.StructB2{StructB: &ts.StructB{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructB2{StructB: &ts.StructB{X: "not_equal"}, X: "not_equal"}, wantEqual: false, reason: "Equal method on StructB pointer called, but inequal X field", }, { label: label + "/StructC2/ValueEqual", x: ts.StructC2{StructC: &ts.StructC{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructC2{StructC: &ts.StructC{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method called on StructC2 value due to forwarded StructC pointer", }, { label: label + "/StructC2/PointerEqual", x: &ts.StructC2{StructC: &ts.StructC{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructC2{StructC: &ts.StructC{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method called on StructC2 pointer due to forwarded StructC pointer", }, { label: label + "/StructD2/ValueEqual", x: ts.StructD2{StructD: &ts.StructD{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructD2{StructD: &ts.StructD{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method called on StructD2 value due to forwarded StructD pointer", }, { label: label + "/StructD2/PointerEqual", x: &ts.StructD2{StructD: &ts.StructD{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructD2{StructD: &ts.StructD{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method called on StructD2 pointer due to forwarded StructD pointer", }, { label: label + "/StructE2/ValueEqual", x: ts.StructE2{StructE: &ts.StructE{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructE2{StructE: &ts.StructE{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method called on StructE2 value due to forwarded StructE pointer", }, { label: label + "/StructE2/PointerEqual", x: &ts.StructE2{StructE: &ts.StructE{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructE2{StructE: &ts.StructE{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method called on StructE2 pointer due to forwarded StructE pointer", }, { label: label + "/StructF2/ValueEqual", x: ts.StructF2{StructF: &ts.StructF{X: "NotEqual"}, X: "NotEqual"}, y: ts.StructF2{StructF: &ts.StructF{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method called on StructF2 value due to forwarded StructF pointer", }, { label: label + "/StructF2/PointerEqual", x: &ts.StructF2{StructF: &ts.StructF{X: "NotEqual"}, X: "NotEqual"}, y: &ts.StructF2{StructF: &ts.StructF{X: "not_equal"}, X: "not_equal"}, wantEqual: true, reason: "Equal method called on StructF2 pointer due to forwarded StructF pointer", }, { label: label + "/StructNo/Inequal", x: ts.StructNo{X: "NotEqual"}, y: ts.StructNo{X: "not_equal"}, wantEqual: false, reason: "Equal method not called since StructNo is not assignable to InterfaceA", }, { label: label + "/AssignA/Equal", x: ts.AssignA(func() int { return 0 }), y: ts.AssignA(func() int { return 1 }), wantEqual: true, reason: "Equal method called since named func is assignable to unnamed func", }, { label: label + "/AssignB/Equal", x: ts.AssignB(struct{ A int }{0}), y: ts.AssignB(struct{ A int }{1}), wantEqual: true, reason: "Equal method called since named struct is assignable to unnamed struct", }, { label: label + "/AssignC/Equal", x: ts.AssignC(make(chan bool)), y: ts.AssignC(make(chan bool)), wantEqual: true, reason: "Equal method called since named channel is assignable to unnamed channel", }, { label: label + "/AssignD/Equal", x: ts.AssignD(make(chan bool)), y: ts.AssignD(make(chan bool)), wantEqual: true, reason: "Equal method called since named channel is assignable to unnamed channel", }} } type ( CycleAlpha struct { Name string Bravos map[string]*CycleBravo } CycleBravo struct { ID int Name string Mods int Alphas map[string]*CycleAlpha } ) func cycleTests() []test { const label = "Cycle" type ( P *P S []S M map[int]M ) makeGraph := func() map[string]*CycleAlpha { v := map[string]*CycleAlpha{ "Foo": &CycleAlpha{ Name: "Foo", Bravos: map[string]*CycleBravo{ "FooBravo": &CycleBravo{ Name: "FooBravo", ID: 101, Mods: 100, Alphas: map[string]*CycleAlpha{ "Foo": nil, // cyclic reference }, }, }, }, "Bar": &CycleAlpha{ Name: "Bar", Bravos: map[string]*CycleBravo{ "BarBuzzBravo": &CycleBravo{ Name: "BarBuzzBravo", ID: 102, Mods: 2, Alphas: map[string]*CycleAlpha{ "Bar": nil, // cyclic reference "Buzz": nil, // cyclic reference }, }, "BuzzBarBravo": &CycleBravo{ Name: "BuzzBarBravo", ID: 103, Mods: 0, Alphas: map[string]*CycleAlpha{ "Bar": nil, // cyclic reference "Buzz": nil, // cyclic reference }, }, }, }, "Buzz": &CycleAlpha{ Name: "Buzz", Bravos: map[string]*CycleBravo{ "BarBuzzBravo": nil, // cyclic reference "BuzzBarBravo": nil, // cyclic reference }, }, } v["Foo"].Bravos["FooBravo"].Alphas["Foo"] = v["Foo"] v["Bar"].Bravos["BarBuzzBravo"].Alphas["Bar"] = v["Bar"] v["Bar"].Bravos["BarBuzzBravo"].Alphas["Buzz"] = v["Buzz"] v["Bar"].Bravos["BuzzBarBravo"].Alphas["Bar"] = v["Bar"] v["Bar"].Bravos["BuzzBarBravo"].Alphas["Buzz"] = v["Buzz"] v["Buzz"].Bravos["BarBuzzBravo"] = v["Bar"].Bravos["BarBuzzBravo"] v["Buzz"].Bravos["BuzzBarBravo"] = v["Bar"].Bravos["BuzzBarBravo"] return v } var tests []test type XY struct{ x, y interface{} } for _, tt := range []struct { label string in XY wantEqual bool reason string }{{ label: "PointersEqual", in: func() XY { x := new(P) *x = x y := new(P) *y = y return XY{x, y} }(), wantEqual: true, reason: "equal pair of single-node pointers", }, { label: "PointersInequal", in: func() XY { x := new(P) *x = x y1, y2 := new(P), new(P) *y1 = y2 *y2 = y1 return XY{x, y1} }(), wantEqual: false, reason: "inequal pair of single-node and double-node pointers", }, { label: "SlicesEqual", in: func() XY { x := S{nil} x[0] = x y := S{nil} y[0] = y return XY{x, y} }(), wantEqual: true, reason: "equal pair of single-node slices", }, { label: "SlicesInequal", in: func() XY { x := S{nil} x[0] = x y1, y2 := S{nil}, S{nil} y1[0] = y2 y2[0] = y1 return XY{x, y1} }(), wantEqual: false, reason: "inequal pair of single-node and double node slices", }, { label: "MapsEqual", in: func() XY { x := M{0: nil} x[0] = x y := M{0: nil} y[0] = y return XY{x, y} }(), wantEqual: true, reason: "equal pair of single-node maps", }, { label: "MapsInequal", in: func() XY { x := M{0: nil} x[0] = x y1, y2 := M{0: nil}, M{0: nil} y1[0] = y2 y2[0] = y1 return XY{x, y1} }(), wantEqual: false, reason: "inequal pair of single-node and double-node maps", }, { label: "GraphEqual", in: XY{makeGraph(), makeGraph()}, wantEqual: true, reason: "graphs are equal since they have identical forms", }, { label: "GraphInequalZeroed", in: func() XY { x := makeGraph() y := makeGraph() y["Foo"].Bravos["FooBravo"].ID = 0 y["Bar"].Bravos["BarBuzzBravo"].ID = 0 y["Bar"].Bravos["BuzzBarBravo"].ID = 0 return XY{x, y} }(), wantEqual: false, reason: "graphs are inequal because the ID fields are different", }, { label: "GraphInequalStruct", in: func() XY { x := makeGraph() y := makeGraph() x["Buzz"].Bravos["BuzzBarBravo"] = &CycleBravo{ Name: "BuzzBarBravo", ID: 103, } return XY{x, y} }(), wantEqual: false, reason: "graphs are inequal because they differ on a map element", }} { tests = append(tests, test{ label: label + "/" + tt.label, x: tt.in.x, y: tt.in.y, wantEqual: tt.wantEqual, reason: tt.reason, }) } return tests } func project1Tests() []test { const label = "Project1" ignoreUnexported := cmpopts.IgnoreUnexported( ts.EagleImmutable{}, ts.DreamerImmutable{}, ts.SlapImmutable{}, ts.GoatImmutable{}, ts.DonkeyImmutable{}, ts.LoveRadius{}, ts.SummerLove{}, ts.SummerLoveSummary{}, ) createEagle := func() ts.Eagle { return ts.Eagle{ Name: "eagle", Hounds: []string{"buford", "tannen"}, Desc: "some description", Dreamers: []ts.Dreamer{{}, { Name: "dreamer2", Animal: []interface{}{ ts.Goat{ Target: "corporation", Immutable: &ts.GoatImmutable{ ID: "southbay", State: (*pb.Goat_States)(newInt(5)), Started: now, }, }, ts.Donkey{}, }, Amoeba: 53, }}, Slaps: []ts.Slap{{ Name: "slapID", Args: &pb.MetaData{Stringer: pb.Stringer{X: "metadata"}}, Immutable: &ts.SlapImmutable{ ID: "immutableSlap", MildSlap: true, Started: now, LoveRadius: &ts.LoveRadius{ Summer: &ts.SummerLove{ Summary: &ts.SummerLoveSummary{ Devices: []string{"foo", "bar", "baz"}, ChangeType: []pb.SummerType{1, 2, 3}, }, }, }, }, }}, Immutable: &ts.EagleImmutable{ ID: "eagleID", Birthday: now, MissingCall: (*pb.Eagle_MissingCalls)(newInt(55)), }, } } return []test{{ label: label + "/PanicUnexported", x: ts.Eagle{Slaps: []ts.Slap{{ Args: &pb.MetaData{Stringer: pb.Stringer{X: "metadata"}}, }}}, y: ts.Eagle{Slaps: []ts.Slap{{ Args: &pb.MetaData{Stringer: pb.Stringer{X: "metadata"}}, }}}, wantPanic: "cannot handle unexported field", reason: "struct contains unexported fields", }, { label: label + "/ProtoEqual", x: ts.Eagle{Slaps: []ts.Slap{{ Args: &pb.MetaData{Stringer: pb.Stringer{X: "metadata"}}, }}}, y: ts.Eagle{Slaps: []ts.Slap{{ Args: &pb.MetaData{Stringer: pb.Stringer{X: "metadata"}}, }}}, opts: []cmp.Option{cmp.Comparer(pb.Equal)}, wantEqual: true, reason: "simulated protobuf messages contain the same values", }, { label: label + "/ProtoInequal", x: ts.Eagle{Slaps: []ts.Slap{{}, {}, {}, {}, { Args: &pb.MetaData{Stringer: pb.Stringer{X: "metadata"}}, }}}, y: ts.Eagle{Slaps: []ts.Slap{{}, {}, {}, {}, { Args: &pb.MetaData{Stringer: pb.Stringer{X: "metadata2"}}, }}}, opts: []cmp.Option{cmp.Comparer(pb.Equal)}, wantEqual: false, reason: "simulated protobuf messages contain different values", }, { label: label + "/Equal", x: createEagle(), y: createEagle(), opts: []cmp.Option{ignoreUnexported, cmp.Comparer(pb.Equal)}, wantEqual: true, reason: "equal because values are the same", }, { label: label + "/Inequal", x: func() ts.Eagle { eg := createEagle() eg.Dreamers[1].Animal[0].(ts.Goat).Immutable.ID = "southbay2" eg.Dreamers[1].Animal[0].(ts.Goat).Immutable.State = (*pb.Goat_States)(newInt(6)) eg.Slaps[0].Immutable.MildSlap = false return eg }(), y: func() ts.Eagle { eg := createEagle() devs := eg.Slaps[0].Immutable.LoveRadius.Summer.Summary.Devices eg.Slaps[0].Immutable.LoveRadius.Summer.Summary.Devices = devs[:1] return eg }(), opts: []cmp.Option{ignoreUnexported, cmp.Comparer(pb.Equal)}, wantEqual: false, reason: "inequal because some values are different", }} } type germSorter []*pb.Germ func (gs germSorter) Len() int { return len(gs) } func (gs germSorter) Less(i, j int) bool { return gs[i].String() < gs[j].String() } func (gs germSorter) Swap(i, j int) { gs[i], gs[j] = gs[j], gs[i] } func project2Tests() []test { const label = "Project2" sortGerms := cmp.Transformer("Sort", func(in []*pb.Germ) []*pb.Germ { out := append([]*pb.Germ(nil), in...) // Make copy sort.Sort(germSorter(out)) return out }) equalDish := cmp.Comparer(func(x, y *ts.Dish) bool { if x == nil || y == nil { return x == nil && y == nil } px, err1 := x.Proto() py, err2 := y.Proto() if err1 != nil || err2 != nil { return err1 == err2 } return pb.Equal(px, py) }) createBatch := func() ts.GermBatch { return ts.GermBatch{ DirtyGerms: map[int32][]*pb.Germ{ 17: { {Stringer: pb.Stringer{X: "germ1"}}, }, 18: { {Stringer: pb.Stringer{X: "germ2"}}, {Stringer: pb.Stringer{X: "germ3"}}, {Stringer: pb.Stringer{X: "germ4"}}, }, }, GermMap: map[int32]*pb.Germ{ 13: {Stringer: pb.Stringer{X: "germ13"}}, 21: {Stringer: pb.Stringer{X: "germ21"}}, }, DishMap: map[int32]*ts.Dish{ 0: ts.CreateDish(nil, io.EOF), 1: ts.CreateDish(nil, io.ErrUnexpectedEOF), 2: ts.CreateDish(&pb.Dish{Stringer: pb.Stringer{X: "dish"}}, nil), }, HasPreviousResult: true, DirtyID: 10, GermStrain: 421, InfectedAt: now, } } return []test{{ label: label + "/PanicUnexported", x: createBatch(), y: createBatch(), wantPanic: "cannot handle unexported field", reason: "struct contains unexported fields", }, { label: label + "/Equal", x: createBatch(), y: createBatch(), opts: []cmp.Option{cmp.Comparer(pb.Equal), sortGerms, equalDish}, wantEqual: true, reason: "equal because identical values are compared", }, { label: label + "/InequalOrder", x: createBatch(), y: func() ts.GermBatch { gb := createBatch() s := gb.DirtyGerms[18] s[0], s[1], s[2] = s[1], s[2], s[0] return gb }(), opts: []cmp.Option{cmp.Comparer(pb.Equal), equalDish}, wantEqual: false, reason: "inequal because slice contains elements in differing order", }, { label: label + "/EqualOrder", x: createBatch(), y: func() ts.GermBatch { gb := createBatch() s := gb.DirtyGerms[18] s[0], s[1], s[2] = s[1], s[2], s[0] return gb }(), opts: []cmp.Option{cmp.Comparer(pb.Equal), sortGerms, equalDish}, wantEqual: true, reason: "equal because unordered slice is sorted using transformer", }, { label: label + "/Inequal", x: func() ts.GermBatch { gb := createBatch() delete(gb.DirtyGerms, 17) gb.DishMap[1] = nil return gb }(), y: func() ts.GermBatch { gb := createBatch() gb.DirtyGerms[18] = gb.DirtyGerms[18][:2] gb.GermStrain = 22 return gb }(), opts: []cmp.Option{cmp.Comparer(pb.Equal), sortGerms, equalDish}, wantEqual: false, reason: "inequal because some values are different", }} } func project3Tests() []test { const label = "Project3" allowVisibility := cmp.AllowUnexported(ts.Dirt{}) ignoreLocker := cmpopts.IgnoreInterfaces(struct{ sync.Locker }{}) transformProtos := cmp.Transformer("λ", func(x pb.Dirt) *pb.Dirt { return &x }) equalTable := cmp.Comparer(func(x, y ts.Table) bool { tx, ok1 := x.(*ts.MockTable) ty, ok2 := y.(*ts.MockTable) if !ok1 || !ok2 { panic("table type must be MockTable") } return cmp.Equal(tx.State(), ty.State()) }) createDirt := func() (d ts.Dirt) { d.SetTable(ts.CreateMockTable([]string{"a", "b", "c"})) d.SetTimestamp(12345) d.Discord = 554 d.Proto = pb.Dirt{Stringer: pb.Stringer{X: "proto"}} d.SetWizard(map[string]*pb.Wizard{ "harry": {Stringer: pb.Stringer{X: "potter"}}, "albus": {Stringer: pb.Stringer{X: "dumbledore"}}, }) d.SetLastTime(54321) return d } return []test{{ label: label + "/PanicUnexported1", x: createDirt(), y: createDirt(), wantPanic: "cannot handle unexported field", reason: "struct contains unexported fields", }, { label: label + "/PanicUnexported2", x: createDirt(), y: createDirt(), opts: []cmp.Option{allowVisibility, ignoreLocker, cmp.Comparer(pb.Equal), equalTable}, wantPanic: "cannot handle unexported field", reason: "struct contains references to simulated protobuf types with unexported fields", }, { label: label + "/Equal", x: createDirt(), y: createDirt(), opts: []cmp.Option{allowVisibility, transformProtos, ignoreLocker, cmp.Comparer(pb.Equal), equalTable}, wantEqual: true, reason: "transformer used to create reference to protobuf message so it works with pb.Equal", }, { label: label + "/Inequal", x: func() ts.Dirt { d := createDirt() d.SetTable(ts.CreateMockTable([]string{"a", "c"})) d.Proto = pb.Dirt{Stringer: pb.Stringer{X: "blah"}} return d }(), y: func() ts.Dirt { d := createDirt() d.Discord = 500 d.SetWizard(map[string]*pb.Wizard{ "harry": {Stringer: pb.Stringer{X: "otter"}}, }) return d }(), opts: []cmp.Option{allowVisibility, transformProtos, ignoreLocker, cmp.Comparer(pb.Equal), equalTable}, wantEqual: false, reason: "inequal because some values are different", }} } func project4Tests() []test { const label = "Project4" allowVisibility := cmp.AllowUnexported( ts.Cartel{}, ts.Headquarter{}, ts.Poison{}, ) transformProtos := cmp.Transformer("λ", func(x pb.Restrictions) *pb.Restrictions { return &x }) createCartel := func() ts.Cartel { var p ts.Poison p.SetPoisonType(5) p.SetExpiration(now) p.SetManufacturer("acme") var hq ts.Headquarter hq.SetID(5) hq.SetLocation("moon") hq.SetSubDivisions([]string{"alpha", "bravo", "charlie"}) hq.SetMetaData(&pb.MetaData{Stringer: pb.Stringer{X: "metadata"}}) hq.SetPublicMessage([]byte{1, 2, 3, 4, 5}) hq.SetHorseBack("abcdef") hq.SetStatus(44) var c ts.Cartel c.Headquarter = hq c.SetSource("mars") c.SetCreationTime(now) c.SetBoss("al capone") c.SetPoisons([]*ts.Poison{&p}) return c } return []test{{ label: label + "/PanicUnexported1", x: createCartel(), y: createCartel(), wantPanic: "cannot handle unexported field", reason: "struct contains unexported fields", }, { label: label + "/PanicUnexported2", x: createCartel(), y: createCartel(), opts: []cmp.Option{allowVisibility, cmp.Comparer(pb.Equal)}, wantPanic: "cannot handle unexported field", reason: "struct contains references to simulated protobuf types with unexported fields", }, { label: label + "/Equal", x: createCartel(), y: createCartel(), opts: []cmp.Option{allowVisibility, transformProtos, cmp.Comparer(pb.Equal)}, wantEqual: true, reason: "transformer used to create reference to protobuf message so it works with pb.Equal", }, { label: label + "/Inequal", x: func() ts.Cartel { d := createCartel() var p1, p2 ts.Poison p1.SetPoisonType(1) p1.SetExpiration(now) p1.SetManufacturer("acme") p2.SetPoisonType(2) p2.SetManufacturer("acme2") d.SetPoisons([]*ts.Poison{&p1, &p2}) return d }(), y: func() ts.Cartel { d := createCartel() d.SetSubDivisions([]string{"bravo", "charlie"}) d.SetPublicMessage([]byte{1, 2, 4, 3, 5}) return d }(), opts: []cmp.Option{allowVisibility, transformProtos, cmp.Comparer(pb.Equal)}, wantEqual: false, reason: "inequal because some values are different", }} } // BenchmarkBytes benchmarks the performance of performing Equal or Diff on // large slices of bytes. func BenchmarkBytes(b *testing.B) { // Create a list of PathFilters that never apply, but are evaluated. const maxFilters = 5 var filters cmp.Options errorIface := reflect.TypeOf((*error)(nil)).Elem() for i := 0; i <= maxFilters; i++ { filters = append(filters, cmp.FilterPath(func(p cmp.Path) bool { return p.Last().Type().AssignableTo(errorIface) // Never true }, cmp.Ignore())) } type benchSize struct { label string size int64 } for _, ts := range []benchSize{ {"4KiB", 1 << 12}, {"64KiB", 1 << 16}, {"1MiB", 1 << 20}, {"16MiB", 1 << 24}, } { bx := append(append(make([]byte, ts.size/2), 'x'), make([]byte, ts.size/2)...) by := append(append(make([]byte, ts.size/2), 'y'), make([]byte, ts.size/2)...) b.Run(ts.label, func(b *testing.B) { // Iteratively add more filters that never apply, but are evaluated // to measure the cost of simply evaluating each filter. for i := 0; i <= maxFilters; i++ { b.Run(fmt.Sprintf("EqualFilter%d", i), func(b *testing.B) { b.ReportAllocs() b.SetBytes(2 * ts.size) for j := 0; j < b.N; j++ { cmp.Equal(bx, by, filters[:i]...) } }) } for i := 0; i <= maxFilters; i++ { b.Run(fmt.Sprintf("DiffFilter%d", i), func(b *testing.B) { b.ReportAllocs() b.SetBytes(2 * ts.size) for j := 0; j < b.N; j++ { cmp.Diff(bx, by, filters[:i]...) } }) } }) } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/0000755000000000000000000000000015024302467023506 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/diff/0000755000000000000000000000000015024302467024416 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/diff/debug_disable.go0000644000000000000000000000062615024302467027522 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !cmp_debug // +build !cmp_debug package diff var debug debugger type debugger struct{} func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { return f } func (debugger) Update() {} func (debugger) Finish() {} dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/diff/diff.go0000644000000000000000000003155015024302467025661 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package diff implements an algorithm for producing edit-scripts. // The edit-script is a sequence of operations needed to transform one list // of symbols into another (or vice-versa). The edits allowed are insertions, // deletions, and modifications. The summation of all edits is called the // Levenshtein distance as this problem is well-known in computer science. // // This package prioritizes performance over accuracy. That is, the run time // is more important than obtaining a minimal Levenshtein distance. package diff import ( "math/rand" "time" "github.com/google/go-cmp/cmp/internal/flags" ) // EditType represents a single operation within an edit-script. type EditType uint8 const ( // Identity indicates that a symbol pair is identical in both list X and Y. Identity EditType = iota // UniqueX indicates that a symbol only exists in X and not Y. UniqueX // UniqueY indicates that a symbol only exists in Y and not X. UniqueY // Modified indicates that a symbol pair is a modification of each other. Modified ) // EditScript represents the series of differences between two lists. type EditScript []EditType // String returns a human-readable string representing the edit-script where // Identity, UniqueX, UniqueY, and Modified are represented by the // '.', 'X', 'Y', and 'M' characters, respectively. func (es EditScript) String() string { b := make([]byte, len(es)) for i, e := range es { switch e { case Identity: b[i] = '.' case UniqueX: b[i] = 'X' case UniqueY: b[i] = 'Y' case Modified: b[i] = 'M' default: panic("invalid edit-type") } } return string(b) } // stats returns a histogram of the number of each type of edit operation. func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { for _, e := range es { switch e { case Identity: s.NI++ case UniqueX: s.NX++ case UniqueY: s.NY++ case Modified: s.NM++ default: panic("invalid edit-type") } } return } // Dist is the Levenshtein distance and is guaranteed to be 0 if and only if // lists X and Y are equal. func (es EditScript) Dist() int { return len(es) - es.stats().NI } // LenX is the length of the X list. func (es EditScript) LenX() int { return len(es) - es.stats().NY } // LenY is the length of the Y list. func (es EditScript) LenY() int { return len(es) - es.stats().NX } // EqualFunc reports whether the symbols at indexes ix and iy are equal. // When called by Difference, the index is guaranteed to be within nx and ny. type EqualFunc func(ix int, iy int) Result // Result is the result of comparison. // NumSame is the number of sub-elements that are equal. // NumDiff is the number of sub-elements that are not equal. type Result struct{ NumSame, NumDiff int } // BoolResult returns a Result that is either Equal or not Equal. func BoolResult(b bool) Result { if b { return Result{NumSame: 1} // Equal, Similar } else { return Result{NumDiff: 2} // Not Equal, not Similar } } // Equal indicates whether the symbols are equal. Two symbols are equal // if and only if NumDiff == 0. If Equal, then they are also Similar. func (r Result) Equal() bool { return r.NumDiff == 0 } // Similar indicates whether two symbols are similar and may be represented // by using the Modified type. As a special case, we consider binary comparisons // (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. // // The exact ratio of NumSame to NumDiff to determine similarity may change. func (r Result) Similar() bool { // Use NumSame+1 to offset NumSame so that binary comparisons are similar. return r.NumSame+1 >= r.NumDiff } var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. // // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: // - eq == (es.Dist()==0) // - nx == es.LenX() // - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm // favors performance over optimality. The exact output is not guaranteed to // be stable and may change over time. func Difference(nx, ny int, f EqualFunc) (es EditScript) { // This algorithm is based on traversing what is known as an "edit-graph". // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" // by Eugene W. Myers. Since D can be as large as N itself, this is // effectively O(N^2). Unlike the algorithm from that paper, we are not // interested in the optimal path, but at least some "decent" path. // // For example, let X and Y be lists of symbols: // X = [A B C A B B A] // Y = [C B A B A C] // // The edit-graph can be drawn as the following: // A B C A B B A // ┌─────────────┐ // C │_|_|\|_|_|_|_│ 0 // B │_|\|_|_|\|\|_│ 1 // A │\|_|_|\|_|_|\│ 2 // B │_|\|_|_|\|\|_│ 3 // A │\|_|_|\|_|_|\│ 4 // C │ | |\| | | | │ 5 // └─────────────┘ 6 // 0 1 2 3 4 5 6 7 // // List X is written along the horizontal axis, while list Y is written // along the vertical axis. At any point on this grid, if the symbol in // list X matches the corresponding symbol in list Y, then a '\' is drawn. // The goal of any minimal edit-script algorithm is to find a path from the // top-left corner to the bottom-right corner, while traveling through the // fewest horizontal or vertical edges. // A horizontal edge is equivalent to inserting a symbol from list X. // A vertical edge is equivalent to inserting a symbol from list Y. // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: // - fwdFrontier.X < revFrontier.X // - fwdFrontier.Y < revFrontier.Y // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} fwdFrontier := fwdPath.point // Forward search frontier revFrontier := revPath.point // Reverse search frontier // Search budget bounds the cost of searching for better paths. // The longest sequence of non-matching symbols that can be tolerated is // approximately the square-root of the search budget. searchBudget := 4 * (nx + ny) // O(n) // Running the tests with the "cmp_debug" build tag prints a visualization // of the algorithm running in real-time. This is educational for // understanding how the algorithm works. See debug_enable.go. f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) // The algorithm below is a greedy, meet-in-the-middle algorithm for // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: // - Searching for differences switches back-and-forth between // a search that starts at the beginning (the top-left corner), and // a search that starts at the end (the bottom-right corner). // The goal of the search is connect with the search // from the opposite corner. // - As we search, we build a path in a greedy manner, // where the first match seen is added to the path (this is sub-optimal, // but provides a decent result in practice). When matches are found, // we try the next pair of symbols in the lists and follow all matches // as far as possible. // - When searching for matches, we search along a diagonal going through // through the "frontier" point. If no matches are found, // we advance the frontier towards the opposite corner. // - This algorithm terminates when either the X coordinates or the // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed // that two lists commonly differ because elements were added to the front // or end of the other list. // // Non-deterministically start with either the forward or reverse direction // to introduce some deliberate instability so that we have the flexibility // to change this algorithm in the future. if flags.Deterministic || randBool { goto forwardSearch } else { goto reverseSearch } forwardSearch: { // Forward search from the beginning. if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { goto finishSearch } for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{fwdFrontier.X + z, fwdFrontier.Y - z} switch { case p.X >= revPath.X || p.Y < fwdPath.Y: stop1 = true // Hit top-right corner case p.Y >= revPath.Y || p.X < fwdPath.X: stop2 = true // Hit bottom-left corner case f(p.X, p.Y).Equal(): // Match found, so connect the path to this point. fwdPath.connect(p, f) fwdPath.append(Identity) // Follow sequence of matches as far as possible. for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { if !f(fwdPath.X, fwdPath.Y).Equal() { break } fwdPath.append(Identity) } fwdFrontier = fwdPath.point stop1, stop2 = true, true default: searchBudget-- // Match not found } debug.Update() } // Advance the frontier towards reverse point. if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { fwdFrontier.X++ } else { fwdFrontier.Y++ } goto reverseSearch } reverseSearch: { // Reverse search from the end. if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { goto finishSearch } for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{revFrontier.X - z, revFrontier.Y + z} switch { case fwdPath.X >= p.X || revPath.Y < p.Y: stop1 = true // Hit bottom-left corner case fwdPath.Y >= p.Y || revPath.X < p.X: stop2 = true // Hit top-right corner case f(p.X-1, p.Y-1).Equal(): // Match found, so connect the path to this point. revPath.connect(p, f) revPath.append(Identity) // Follow sequence of matches as far as possible. for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { if !f(revPath.X-1, revPath.Y-1).Equal() { break } revPath.append(Identity) } revFrontier = revPath.point stop1, stop2 = true, true default: searchBudget-- // Match not found } debug.Update() } // Advance the frontier towards forward point. if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { revFrontier.X-- } else { revFrontier.Y-- } goto forwardSearch } finishSearch: // Join the forward and reverse paths and then append the reverse path. fwdPath.connect(revPath.point, f) for i := len(revPath.es) - 1; i >= 0; i-- { t := revPath.es[i] revPath.es = revPath.es[:i] fwdPath.append(t) } debug.Finish() return fwdPath.es } type path struct { dir int // +1 if forward, -1 if reverse point // Leading point of the EditScript path es EditScript } // connect appends any necessary Identity, Modified, UniqueX, or UniqueY types // to the edit-script to connect p.point to dst. func (p *path) connect(dst point, f EqualFunc) { if p.dir > 0 { // Connect in forward direction. for dst.X > p.X && dst.Y > p.Y { switch r := f(p.X, p.Y); { case r.Equal(): p.append(Identity) case r.Similar(): p.append(Modified) case dst.X-p.X >= dst.Y-p.Y: p.append(UniqueX) default: p.append(UniqueY) } } for dst.X > p.X { p.append(UniqueX) } for dst.Y > p.Y { p.append(UniqueY) } } else { // Connect in reverse direction. for p.X > dst.X && p.Y > dst.Y { switch r := f(p.X-1, p.Y-1); { case r.Equal(): p.append(Identity) case r.Similar(): p.append(Modified) case p.Y-dst.Y >= p.X-dst.X: p.append(UniqueY) default: p.append(UniqueX) } } for p.X > dst.X { p.append(UniqueX) } for p.Y > dst.Y { p.append(UniqueY) } } } func (p *path) append(t EditType) { p.es = append(p.es, t) switch t { case Identity, Modified: p.add(p.dir, p.dir) case UniqueX: p.add(p.dir, 0) case UniqueY: p.add(0, p.dir) } debug.Update() } type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. // // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { x = ^x } return x >> 1 } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/diff/diff_test.go0000644000000000000000000002627015024302467026723 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package diff import ( "fmt" "math/rand" "strings" "testing" "unicode" ) func TestDifference(t *testing.T) { tests := []struct { // Before passing x and y to Difference, we strip all spaces so that // they can be used by the test author to indicate a missing symbol // in one of the lists. x, y string want string // '|' separated list of possible outputs }{{ x: "", y: "", want: "", }, { x: "#", y: "#", want: ".", }, { x: "##", y: "# ", want: ".X|X.", }, { x: "a#", y: "A ", want: "MX", }, { x: "#a", y: " A", want: "XM", }, { x: "# ", y: "##", want: ".Y|Y.", }, { x: " #", y: "@#", want: "Y.", }, { x: "@#", y: " #", want: "X.", }, { x: "##########0123456789", y: " 0123456789", want: "XXXXXXXXXX..........", }, { x: " 0123456789", y: "##########0123456789", want: "YYYYYYYYYY..........", }, { x: "#####0123456789#####", y: " 0123456789 ", want: "XXXXX..........XXXXX", }, { x: " 0123456789 ", y: "#####0123456789#####", want: "YYYYY..........YYYYY", }, { x: "01234##########56789", y: "01234 56789", want: ".....XXXXXXXXXX.....", }, { x: "01234 56789", y: "01234##########56789", want: ".....YYYYYYYYYY.....", }, { x: "0123456789##########", y: "0123456789 ", want: "..........XXXXXXXXXX", }, { x: "0123456789 ", y: "0123456789##########", want: "..........YYYYYYYYYY", }, { x: "abcdefghij0123456789", y: "ABCDEFGHIJ0123456789", want: "MMMMMMMMMM..........", }, { x: "ABCDEFGHIJ0123456789", y: "abcdefghij0123456789", want: "MMMMMMMMMM..........", }, { x: "01234abcdefghij56789", y: "01234ABCDEFGHIJ56789", want: ".....MMMMMMMMMM.....", }, { x: "01234ABCDEFGHIJ56789", y: "01234abcdefghij56789", want: ".....MMMMMMMMMM.....", }, { x: "0123456789abcdefghij", y: "0123456789ABCDEFGHIJ", want: "..........MMMMMMMMMM", }, { x: "0123456789ABCDEFGHIJ", y: "0123456789abcdefghij", want: "..........MMMMMMMMMM", }, { x: "ABCDEFGHIJ0123456789 ", y: " 0123456789abcdefghij", want: "XXXXXXXXXX..........YYYYYYYYYY", }, { x: " 0123456789abcdefghij", y: "ABCDEFGHIJ0123456789 ", want: "YYYYYYYYYY..........XXXXXXXXXX", }, { x: "ABCDE0123456789 FGHIJ", y: " 0123456789abcdefghij", want: "XXXXX..........YYYYYMMMMM", }, { x: " 0123456789abcdefghij", y: "ABCDE0123456789 FGHIJ", want: "YYYYY..........XXXXXMMMMM", }, { x: "ABCDE01234F G H I J 56789 ", y: " 01234 a b c d e56789fghij", want: "XXXXX.....XYXYXYXYXY.....YYYYY", }, { x: " 01234a b c d e 56789fghij", y: "ABCDE01234 F G H I J56789 ", want: "YYYYY.....XYXYXYXYXY.....XXXXX", }, { x: "FGHIJ01234ABCDE56789 ", y: " 01234abcde56789fghij", want: "XXXXX.....MMMMM.....YYYYY", }, { x: " 01234abcde56789fghij", y: "FGHIJ01234ABCDE56789 ", want: "YYYYY.....MMMMM.....XXXXX", }, { x: "ABCAB BA ", y: " C BABAC", want: "XX.X.Y..Y|XX.Y.X..Y", }, { x: "# #### ###", y: "#y####yy###", want: ".Y....YY...", }, { x: "# #### # ##x#x", y: "#y####y y## # ", want: ".Y....YXY..X.X", }, { x: "###z#z###### x #", y: "#y##Z#Z###### yy#", want: ".Y..M.M......XYY.", }, { x: "0 12z3x 456789 x x 0", y: "0y12Z3 y456789y y y0", want: ".Y..M.XY......YXYXY.|.Y..M.XY......XYXYY.", }, { x: "0 2 4 6 8 ..................abXXcdEXF.ghXi", y: " 1 3 5 7 9..................AB CDE F.GH I", want: "XYXYXYXYXY..................MMXXMM.X..MMXM", }, { x: "I HG.F EDC BA..................9 7 5 3 1 ", y: "iXhg.FXEdcXXba.................. 8 6 4 2 0", want: "MYMM..Y.MMYYMM..................XYXYXYXYXY", }, { x: "x1234", y: " 1234", want: "X....", }, { x: "x123x4", y: " 123 4", want: "X...X.", }, { x: "x1234x56", y: " 1234 ", want: "X....XXX", }, { x: "x1234xxx56", y: " 1234 56", want: "X....XXX..", }, { x: ".1234...ab", y: " 1234 AB", want: "X....XXXMM", }, { x: "x1234xxab.", y: " 1234 AB ", want: "X....XXMMX", }, { x: " 0123456789", y: "9012345678 ", want: "Y.........X", }, { x: " 0123456789", y: "8901234567 ", want: "YY........XX", }, { x: " 0123456789", y: "7890123456 ", want: "YYY.......XXX", }, { x: " 0123456789", y: "6789012345 ", want: "YYYY......XXXX", }, { x: "0123456789 ", y: " 5678901234", want: "XXXXX.....YYYYY|YYYYY.....XXXXX", }, { x: "0123456789 ", y: " 4567890123", want: "XXXX......YYYY", }, { x: "0123456789 ", y: " 3456789012", want: "XXX.......YYY", }, { x: "0123456789 ", y: " 2345678901", want: "XX........YY", }, { x: "0123456789 ", y: " 1234567890", want: "X.........Y", }, { x: "0 1 2 3 45 6 7 8 9 ", y: " 9 8 7 6 54 3 2 1 0", want: "XYXYXYXYX.YXYXYXYXY", }, { x: "0 1 2345678 9 ", y: " 6 72 5 819034", want: "XYXY.XX.XX.Y.YYY", }, { x: "F B Q M O I G T L N72X90 E 4S P 651HKRJU DA 83CVZW", y: " 5 W H XO10R9IV K ZLCTAJ8P3N SEQM4 7 2G6 UBD F ", want: "XYXYXYXY.YYYY.YXYXY.YYYYYYY.XXXXXY.YY.XYXYY.XXXXXX.Y.XYXXXXXX", }} for _, tt := range tests { t.Run("", func(t *testing.T) { x := strings.Replace(tt.x, " ", "", -1) y := strings.Replace(tt.y, " ", "", -1) es := testStrings(t, x, y) var want string got := es.String() for _, want = range strings.Split(tt.want, "|") { if got == want { return } } t.Errorf("Difference(%s, %s):\ngot %s\nwant %s", x, y, got, want) }) } } func TestDifferenceFuzz(t *testing.T) { tests := []struct{ px, py, pm float32 }{ {px: 0.0, py: 0.0, pm: 0.1}, {px: 0.0, py: 0.1, pm: 0.0}, {px: 0.1, py: 0.0, pm: 0.0}, {px: 0.0, py: 0.1, pm: 0.1}, {px: 0.1, py: 0.0, pm: 0.1}, {px: 0.2, py: 0.2, pm: 0.2}, {px: 0.3, py: 0.1, pm: 0.2}, {px: 0.1, py: 0.3, pm: 0.2}, {px: 0.2, py: 0.2, pm: 0.2}, {px: 0.3, py: 0.3, pm: 0.3}, {px: 0.1, py: 0.1, pm: 0.5}, {px: 0.4, py: 0.1, pm: 0.5}, {px: 0.3, py: 0.2, pm: 0.5}, {px: 0.2, py: 0.3, pm: 0.5}, {px: 0.1, py: 0.4, pm: 0.5}, } for i, tt := range tests { t.Run(fmt.Sprintf("P%d", i), func(t *testing.T) { // Sweep from 1B to 1KiB. for n := 1; n <= 1024; n <<= 1 { t.Run(fmt.Sprintf("N%d", n), func(t *testing.T) { for j := 0; j < 10; j++ { x, y := generateStrings(n, tt.px, tt.py, tt.pm, int64(j)) testStrings(t, x, y) } }) } }) } } func BenchmarkDifference(b *testing.B) { for n := 1 << 10; n <= 1<<20; n <<= 2 { b.Run(fmt.Sprintf("N%d", n), func(b *testing.B) { x, y := generateStrings(n, 0.05, 0.05, 0.10, 0) b.ReportAllocs() b.SetBytes(int64(len(x) + len(y))) for i := 0; i < b.N; i++ { Difference(len(x), len(y), func(ix, iy int) Result { return compareByte(x[ix], y[iy]) }) } }) } } func generateStrings(n int, px, py, pm float32, seed int64) (string, string) { if px+py+pm > 1.0 { panic("invalid probabilities") } py += px pm += py b := make([]byte, n) r := rand.New(rand.NewSource(seed)) r.Read(b) var x, y []byte for len(b) > 0 { switch p := r.Float32(); { case p < px: // UniqueX x = append(x, b[0]) case p < py: // UniqueY y = append(y, b[0]) case p < pm: // Modified x = append(x, 'A'+(b[0]%26)) y = append(y, 'a'+(b[0]%26)) default: // Identity x = append(x, b[0]) y = append(y, b[0]) } b = b[1:] } return string(x), string(y) } func testStrings(t *testing.T, x, y string) EditScript { es := Difference(len(x), len(y), func(ix, iy int) Result { return compareByte(x[ix], y[iy]) }) if es.LenX() != len(x) { t.Errorf("es.LenX = %d, want %d", es.LenX(), len(x)) } if es.LenY() != len(y) { t.Errorf("es.LenY = %d, want %d", es.LenY(), len(y)) } if !validateScript(x, y, es) { t.Errorf("invalid edit script: %v", es) } return es } func validateScript(x, y string, es EditScript) bool { var bx, by []byte for _, e := range es { switch e { case Identity: if !compareByte(x[len(bx)], y[len(by)]).Equal() { return false } bx = append(bx, x[len(bx)]) by = append(by, y[len(by)]) case UniqueX: bx = append(bx, x[len(bx)]) case UniqueY: by = append(by, y[len(by)]) case Modified: if !compareByte(x[len(bx)], y[len(by)]).Similar() { return false } bx = append(bx, x[len(bx)]) by = append(by, y[len(by)]) } } return string(bx) == x && string(by) == y } // compareByte returns a Result where the result is Equal if x == y, // similar if x and y differ only in casing, and different otherwise. func compareByte(x, y byte) (r Result) { switch { case x == y: return equalResult // Identity case unicode.ToUpper(rune(x)) == unicode.ToUpper(rune(y)): return similarResult // Modified default: return differentResult // UniqueX or UniqueY } } var ( equalResult = Result{NumDiff: 0} similarResult = Result{NumDiff: 1} differentResult = Result{NumDiff: 2} ) func TestResult(t *testing.T) { tests := []struct { result Result wantEqual bool wantSimilar bool }{ // equalResult is equal since NumDiff == 0, by definition of Equal method. {equalResult, true, true}, // similarResult is similar since it is a binary result where only one // element was compared (i.e., Either NumSame==1 or NumDiff==1). {similarResult, false, true}, // differentResult is different since there are enough differences that // it isn't even considered similar. {differentResult, false, false}, // Zero value is always equal. {Result{NumSame: 0, NumDiff: 0}, true, true}, // Binary comparisons (where NumSame+NumDiff == 1) are always similar. {Result{NumSame: 1, NumDiff: 0}, true, true}, {Result{NumSame: 0, NumDiff: 1}, false, true}, // More complex ratios. The exact ratio for similarity may change, // and may require updates to these test cases. {Result{NumSame: 1, NumDiff: 1}, false, true}, {Result{NumSame: 1, NumDiff: 2}, false, true}, {Result{NumSame: 1, NumDiff: 3}, false, false}, {Result{NumSame: 2, NumDiff: 1}, false, true}, {Result{NumSame: 2, NumDiff: 2}, false, true}, {Result{NumSame: 2, NumDiff: 3}, false, true}, {Result{NumSame: 3, NumDiff: 1}, false, true}, {Result{NumSame: 3, NumDiff: 2}, false, true}, {Result{NumSame: 3, NumDiff: 3}, false, true}, {Result{NumSame: 1000, NumDiff: 0}, true, true}, {Result{NumSame: 1000, NumDiff: 1}, false, true}, {Result{NumSame: 1000, NumDiff: 2}, false, true}, {Result{NumSame: 0, NumDiff: 1000}, false, false}, {Result{NumSame: 1, NumDiff: 1000}, false, false}, {Result{NumSame: 2, NumDiff: 1000}, false, false}, } for _, tt := range tests { if got := tt.result.Equal(); got != tt.wantEqual { t.Errorf("%#v.Equal() = %v, want %v", tt.result, got, tt.wantEqual) } if got := tt.result.Similar(); got != tt.wantSimilar { t.Errorf("%#v.Similar() = %v, want %v", tt.result, got, tt.wantSimilar) } } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/diff/debug_enable.go0000644000000000000000000000776215024302467027355 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build cmp_debug // +build cmp_debug package diff import ( "fmt" "strings" "sync" "time" ) // The algorithm can be seen running in real-time by enabling debugging: // go test -tags=cmp_debug -v // // Example output: // === RUN TestDifference/#34 // ┌───────────────────────────────┐ // │ \ · · · · · · · · · · · · · · │ // │ · # · · · · · · · · · · · · · │ // │ · \ · · · · · · · · · · · · · │ // │ · · \ · · · · · · · · · · · · │ // │ · · · X # · · · · · · · · · · │ // │ · · · # \ · · · · · · · · · · │ // │ · · · · · # # · · · · · · · · │ // │ · · · · · # \ · · · · · · · · │ // │ · · · · · · · \ · · · · · · · │ // │ · · · · · · · · \ · · · · · · │ // │ · · · · · · · · · \ · · · · · │ // │ · · · · · · · · · · \ · · # · │ // │ · · · · · · · · · · · \ # # · │ // │ · · · · · · · · · · · # # # · │ // │ · · · · · · · · · · # # # # · │ // │ · · · · · · · · · # # # # # · │ // │ · · · · · · · · · · · · · · \ │ // └───────────────────────────────┘ // [.Y..M.XY......YXYXY.|] // // The grid represents the edit-graph where the horizontal axis represents // list X and the vertical axis represents list Y. The start of the two lists // is the top-left, while the ends are the bottom-right. The '·' represents // an unexplored node in the graph. The '\' indicates that the two symbols // from list X and Y are equal. The 'X' indicates that two symbols are similar // (but not exactly equal) to each other. The '#' indicates that the two symbols // are different (and not similar). The algorithm traverses this graph trying to // make the paths starting in the top-left and the bottom-right connect. // // The series of '.', 'X', 'Y', and 'M' characters at the bottom represents // the currently established path from the forward and reverse searches, // separated by a '|' character. const ( updateDelay = 100 * time.Millisecond finishDelay = 500 * time.Millisecond ansiTerminal = true // ANSI escape codes used to move terminal cursor ) var debug debugger type debugger struct { sync.Mutex p1, p2 EditScript fwdPath, revPath *EditScript grid []byte lines int } func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { dbg.Lock() dbg.fwdPath, dbg.revPath = p1, p2 top := "┌─" + strings.Repeat("──", nx) + "┐\n" row := "│ " + strings.Repeat("· ", nx) + "│\n" btm := "└─" + strings.Repeat("──", nx) + "┘\n" dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) dbg.lines = strings.Count(dbg.String(), "\n") fmt.Print(dbg) // Wrap the EqualFunc so that we can intercept each result. return func(ix, iy int) (r Result) { cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] for i := range cell { cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot } switch r = f(ix, iy); { case r.Equal(): cell[0] = '\\' case r.Similar(): cell[0] = 'X' default: cell[0] = '#' } return } } func (dbg *debugger) Update() { dbg.print(updateDelay) } func (dbg *debugger) Finish() { dbg.print(finishDelay) dbg.Unlock() } func (dbg *debugger) String() string { dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] for i := len(*dbg.revPath) - 1; i >= 0; i-- { dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) } return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) } func (dbg *debugger) print(d time.Duration) { if ansiTerminal { fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor } fmt.Print(dbg) time.Sleep(d) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/value/0000755000000000000000000000000015024302467024622 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/value/name.go0000644000000000000000000001017615024302467026076 0ustar rootroot// Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package value import ( "reflect" "strconv" ) var anyType = reflect.TypeOf((*interface{})(nil)).Elem() // TypeString is nearly identical to reflect.Type.String, // but has an additional option to specify that full type names be used. func TypeString(t reflect.Type, qualified bool) string { return string(appendTypeName(nil, t, qualified, false)) } func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { // BUG: Go reflection provides no way to disambiguate two named types // of the same name and within the same package, // but declared within the namespace of different functions. // Use the "any" alias instead of "interface{}" for better readability. if t == anyType { return append(b, "any"...) } // Named type. if t.Name() != "" { if qualified && t.PkgPath() != "" { b = append(b, '"') b = append(b, t.PkgPath()...) b = append(b, '"') b = append(b, '.') b = append(b, t.Name()...) } else { b = append(b, t.String()...) } return b } // Unnamed type. switch k := t.Kind(); k { case reflect.Bool, reflect.String, reflect.UnsafePointer, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: b = append(b, k.String()...) case reflect.Chan: if t.ChanDir() == reflect.RecvDir { b = append(b, "<-"...) } b = append(b, "chan"...) if t.ChanDir() == reflect.SendDir { b = append(b, "<-"...) } b = append(b, ' ') b = appendTypeName(b, t.Elem(), qualified, false) case reflect.Func: if !elideFunc { b = append(b, "func"...) } b = append(b, '(') for i := 0; i < t.NumIn(); i++ { if i > 0 { b = append(b, ", "...) } if i == t.NumIn()-1 && t.IsVariadic() { b = append(b, "..."...) b = appendTypeName(b, t.In(i).Elem(), qualified, false) } else { b = appendTypeName(b, t.In(i), qualified, false) } } b = append(b, ')') switch t.NumOut() { case 0: // Do nothing case 1: b = append(b, ' ') b = appendTypeName(b, t.Out(0), qualified, false) default: b = append(b, " ("...) for i := 0; i < t.NumOut(); i++ { if i > 0 { b = append(b, ", "...) } b = appendTypeName(b, t.Out(i), qualified, false) } b = append(b, ')') } case reflect.Struct: b = append(b, "struct{ "...) for i := 0; i < t.NumField(); i++ { if i > 0 { b = append(b, "; "...) } sf := t.Field(i) if !sf.Anonymous { if qualified && sf.PkgPath != "" { b = append(b, '"') b = append(b, sf.PkgPath...) b = append(b, '"') b = append(b, '.') } b = append(b, sf.Name...) b = append(b, ' ') } b = appendTypeName(b, sf.Type, qualified, false) if sf.Tag != "" { b = append(b, ' ') b = strconv.AppendQuote(b, string(sf.Tag)) } } if b[len(b)-1] == ' ' { b = b[:len(b)-1] } else { b = append(b, ' ') } b = append(b, '}') case reflect.Slice, reflect.Array: b = append(b, '[') if k == reflect.Array { b = strconv.AppendUint(b, uint64(t.Len()), 10) } b = append(b, ']') b = appendTypeName(b, t.Elem(), qualified, false) case reflect.Map: b = append(b, "map["...) b = appendTypeName(b, t.Key(), qualified, false) b = append(b, ']') b = appendTypeName(b, t.Elem(), qualified, false) case reflect.Ptr: b = append(b, '*') b = appendTypeName(b, t.Elem(), qualified, false) case reflect.Interface: b = append(b, "interface{ "...) for i := 0; i < t.NumMethod(); i++ { if i > 0 { b = append(b, "; "...) } m := t.Method(i) if qualified && m.PkgPath != "" { b = append(b, '"') b = append(b, m.PkgPath...) b = append(b, '"') b = append(b, '.') } b = append(b, m.Name...) b = appendTypeName(b, m.Type, qualified, true) } if b[len(b)-1] == ' ' { b = b[:len(b)-1] } else { b = append(b, ' ') } b = append(b, '}') default: panic("invalid kind: " + k.String()) } return b } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/value/sort_test.go0000644000000000000000000001126715024302467027206 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package value_test import ( "math" "reflect" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/internal/value" ) func TestSortKeys(t *testing.T) { type ( MyString string MyArray [2]int MyStruct struct { A MyString B MyArray C chan float64 } EmptyStruct struct{} ) opts := []cmp.Option{ cmp.Comparer(func(x, y float64) bool { if math.IsNaN(x) && math.IsNaN(y) { return true } return x == y }), cmp.Comparer(func(x, y complex128) bool { rx, ix, ry, iy := real(x), imag(x), real(y), imag(y) if math.IsNaN(rx) && math.IsNaN(ry) { rx, ry = 0, 0 } if math.IsNaN(ix) && math.IsNaN(iy) { ix, iy = 0, 0 } return rx == ry && ix == iy }), cmp.Comparer(func(x, y chan bool) bool { return true }), cmp.Comparer(func(x, y chan int) bool { return true }), cmp.Comparer(func(x, y chan float64) bool { return true }), cmp.Comparer(func(x, y chan interface{}) bool { return true }), cmp.Comparer(func(x, y *int) bool { return true }), } tests := []struct { in map[interface{}]bool // Set of keys to sort want []interface{} }{{ in: map[interface{}]bool{1: true, 2: true, 3: true}, want: []interface{}{1, 2, 3}, }, { in: map[interface{}]bool{ nil: true, true: true, false: true, -5: true, -55: true, -555: true, uint(1): true, uint(11): true, uint(111): true, "abc": true, "abcd": true, "abcde": true, "foo": true, "bar": true, MyString("abc"): true, MyString("abcd"): true, MyString("abcde"): true, new(int): true, new(int): true, make(chan bool): true, make(chan bool): true, make(chan int): true, make(chan interface{}): true, math.Inf(+1): true, math.Inf(-1): true, 1.2345: true, 12.345: true, 123.45: true, 1234.5: true, 0 + 0i: true, 1 + 0i: true, 2 + 0i: true, 0 + 1i: true, 0 + 2i: true, 0 + 3i: true, [2]int{2, 3}: true, [2]int{4, 0}: true, [2]int{2, 4}: true, MyArray([2]int{2, 4}): true, EmptyStruct{}: true, MyStruct{ "bravo", [2]int{2, 3}, make(chan float64), }: true, MyStruct{ "alpha", [2]int{3, 3}, make(chan float64), }: true, }, want: []interface{}{ nil, false, true, -555, -55, -5, uint(1), uint(11), uint(111), math.Inf(-1), 1.2345, 12.345, 123.45, 1234.5, math.Inf(+1), (0 + 0i), (0 + 1i), (0 + 2i), (0 + 3i), (1 + 0i), (2 + 0i), [2]int{2, 3}, [2]int{2, 4}, [2]int{4, 0}, MyArray([2]int{2, 4}), make(chan bool), make(chan bool), make(chan int), make(chan interface{}), new(int), new(int), "abc", "abcd", "abcde", "bar", "foo", MyString("abc"), MyString("abcd"), MyString("abcde"), EmptyStruct{}, MyStruct{"alpha", [2]int{3, 3}, make(chan float64)}, MyStruct{"bravo", [2]int{2, 3}, make(chan float64)}, }, }, { // NaN values cannot be properly deduplicated. // This is okay since map entries with NaN in the keys cannot be // retrieved anyways. in: map[interface{}]bool{ math.NaN(): true, math.NaN(): true, complex(0, math.NaN()): true, complex(0, math.NaN()): true, complex(math.NaN(), 0): true, complex(math.NaN(), 0): true, complex(math.NaN(), math.NaN()): true, }, want: []interface{}{ math.NaN(), complex(math.NaN(), math.NaN()), complex(math.NaN(), 0), complex(0, math.NaN()), }, }} for i, tt := range tests { // Intentionally pass the map via an unexported field to detect panics. // Unfortunately, we cannot actually test the keys without using unsafe. v := reflect.ValueOf(struct{ x map[interface{}]bool }{tt.in}).Field(0) value.SortKeys(append(v.MapKeys(), v.MapKeys()...)) // Try again, with keys that have read-write access in reflect. v = reflect.ValueOf(tt.in) keys := append(v.MapKeys(), v.MapKeys()...) var got []interface{} for _, k := range value.SortKeys(keys) { got = append(got, k.Interface()) } if d := cmp.Diff(got, tt.want, opts...); d != "" { t.Errorf("test %d, Sort() mismatch (-got +want):\n%s", i, d) } } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/value/sort.go0000644000000000000000000000604715024302467026147 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package value import ( "fmt" "math" "reflect" "sort" ) // SortKeys sorts a list of map keys, deduplicating keys if necessary. // The type of each value must be comparable. func SortKeys(vs []reflect.Value) []reflect.Value { if len(vs) == 0 { return vs } // Sort the map keys. sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) // Deduplicate keys (fails for NaNs). vs2 := vs[:1] for _, v := range vs[1:] { if isLess(vs2[len(vs2)-1], v) { vs2 = append(vs2, v) } } return vs2 } // isLess is a generic function for sorting arbitrary map keys. // The inputs must be of the same type and must be comparable. func isLess(x, y reflect.Value) bool { switch x.Type().Kind() { case reflect.Bool: return !x.Bool() && y.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return x.Int() < y.Int() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return x.Uint() < y.Uint() case reflect.Float32, reflect.Float64: // NOTE: This does not sort -0 as less than +0 // since Go maps treat -0 and +0 as equal keys. fx, fy := x.Float(), y.Float() return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) case reflect.Complex64, reflect.Complex128: cx, cy := x.Complex(), y.Complex() rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) } return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: return x.Pointer() < y.Pointer() case reflect.String: return x.String() < y.String() case reflect.Array: for i := 0; i < x.Len(); i++ { if isLess(x.Index(i), y.Index(i)) { return true } if isLess(y.Index(i), x.Index(i)) { return false } } return false case reflect.Struct: for i := 0; i < x.NumField(); i++ { if isLess(x.Field(i), y.Field(i)) { return true } if isLess(y.Field(i), x.Field(i)) { return false } } return false case reflect.Interface: vx, vy := x.Elem(), y.Elem() if !vx.IsValid() || !vy.IsValid() { return !vx.IsValid() && vy.IsValid() } tx, ty := vx.Type(), vy.Type() if tx == ty { return isLess(x.Elem(), y.Elem()) } if tx.Kind() != ty.Kind() { return vx.Kind() < vy.Kind() } if tx.String() != ty.String() { return tx.String() < ty.String() } if tx.PkgPath() != ty.PkgPath() { return tx.PkgPath() < ty.PkgPath() } // This can happen in rare situations, so we fallback to just comparing // the unique pointer for a reflect.Type. This guarantees deterministic // ordering within a program, but it is obviously not stable. return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() default: // Must be Func, Map, or Slice; which are not comparable. panic(fmt.Sprintf("%T is not comparable", x.Type())) } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/value/name_test.go0000644000000000000000000000663015024302467027135 0ustar rootroot// Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package value import ( "reflect" "strings" "testing" ) type Named struct{} var pkgPath = reflect.TypeOf(Named{}).PkgPath() func TestTypeString(t *testing.T) { tests := []struct { in interface{} want string }{{ in: bool(false), want: "bool", }, { in: int(0), want: "int", }, { in: float64(0), want: "float64", }, { in: string(""), want: "string", }, { in: Named{}, want: "$PackagePath.Named", }, { in: (chan Named)(nil), want: "chan $PackagePath.Named", }, { in: (<-chan Named)(nil), want: "<-chan $PackagePath.Named", }, { in: (chan<- Named)(nil), want: "chan<- $PackagePath.Named", }, { in: (func())(nil), want: "func()", }, { in: (func(Named))(nil), want: "func($PackagePath.Named)", }, { in: (func() Named)(nil), want: "func() $PackagePath.Named", }, { in: (func(int, Named) (int, error))(nil), want: "func(int, $PackagePath.Named) (int, error)", }, { in: (func(...Named))(nil), want: "func(...$PackagePath.Named)", }, { in: struct{}{}, want: "struct{}", }, { in: struct{ Named }{}, want: "struct{ $PackagePath.Named }", }, { in: struct { Named `tag` }{}, want: "struct{ $PackagePath.Named \"tag\" }", }, { in: struct{ Named Named }{}, want: "struct{ Named $PackagePath.Named }", }, { in: struct { Named Named `tag` }{}, want: "struct{ Named $PackagePath.Named \"tag\" }", }, { in: struct { Int int Named Named }{}, want: "struct{ Int int; Named $PackagePath.Named }", }, { in: struct { _ int x Named }{}, want: "struct{ $FieldPrefix._ int; $FieldPrefix.x $PackagePath.Named }", }, { in: []Named(nil), want: "[]$PackagePath.Named", }, { in: []*Named(nil), want: "[]*$PackagePath.Named", }, { in: [10]Named{}, want: "[10]$PackagePath.Named", }, { in: [10]*Named{}, want: "[10]*$PackagePath.Named", }, { in: map[string]string(nil), want: "map[string]string", }, { in: map[Named]Named(nil), want: "map[$PackagePath.Named]$PackagePath.Named", }, { in: (*Named)(nil), want: "*$PackagePath.Named", }, { in: (*interface{})(nil), want: "*any", }, { in: (*interface{ Read([]byte) (int, error) })(nil), want: "*interface{ Read([]uint8) (int, error) }", }, { in: (*interface { F1() F2(Named) F3() Named F4(int, Named) (int, error) F5(...Named) })(nil), want: "*interface{ F1(); F2($PackagePath.Named); F3() $PackagePath.Named; F4(int, $PackagePath.Named) (int, error); F5(...$PackagePath.Named) }", }} for _, tt := range tests { typ := reflect.TypeOf(tt.in) wantShort := tt.want wantShort = strings.Replace(wantShort, "$PackagePath", "value", -1) wantShort = strings.Replace(wantShort, "$FieldPrefix.", "", -1) if gotShort := TypeString(typ, false); gotShort != wantShort { t.Errorf("TypeString(%v, false) mismatch:\ngot: %v\nwant: %v", typ, gotShort, wantShort) } wantQualified := tt.want wantQualified = strings.Replace(wantQualified, "$PackagePath", `"`+pkgPath+`"`, -1) wantQualified = strings.Replace(wantQualified, "$FieldPrefix", `"`+pkgPath+`"`, -1) if gotQualified := TypeString(typ, true); gotQualified != wantQualified { t.Errorf("TypeString(%v, true) mismatch:\ngot: %v\nwant: %v", typ, gotQualified, wantQualified) } } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/value/pointer.go0000644000000000000000000000155115024302467026633 0ustar rootroot// Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package value import ( "reflect" "unsafe" ) // Pointer is an opaque typed pointer and is guaranteed to be comparable. type Pointer struct { p unsafe.Pointer t reflect.Type } // PointerOf returns a Pointer from v, which must be a // reflect.Ptr, reflect.Slice, or reflect.Map. func PointerOf(v reflect.Value) Pointer { // The proper representation of a pointer is unsafe.Pointer, // which is necessary if the GC ever uses a moving collector. return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} } // IsNil reports whether the pointer is nil. func (p Pointer) IsNil() bool { return p.p == nil } // Uintptr returns the pointer as a uintptr. func (p Pointer) Uintptr() uintptr { return uintptr(p.p) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/0000755000000000000000000000000015024302467026115 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/project4.go0000644000000000000000000001321015024302467030173 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package teststructs import ( "time" pb "github.com/google/go-cmp/cmp/internal/testprotos" ) // This is an sanitized example of equality from a real use-case. // The original equality function was as follows: /* func equalCartel(x, y Cartel) bool { if !(equalHeadquarter(x.Headquarter, y.Headquarter) && x.Source() == y.Source() && x.CreationDate().Equal(y.CreationDate()) && x.Boss() == y.Boss() && x.LastCrimeDate().Equal(y.LastCrimeDate())) { return false } if len(x.Poisons()) != len(y.Poisons()) { return false } for i := range x.Poisons() { if !equalPoison(*x.Poisons()[i], *y.Poisons()[i]) { return false } } return true } func equalHeadquarter(x, y Headquarter) bool { xr, yr := x.Restrictions(), y.Restrictions() return x.ID() == y.ID() && x.Location() == y.Location() && reflect.DeepEqual(x.SubDivisions(), y.SubDivisions()) && x.IncorporatedDate().Equal(y.IncorporatedDate()) && pb.Equal(x.MetaData(), y.MetaData()) && bytes.Equal(x.PrivateMessage(), y.PrivateMessage()) && bytes.Equal(x.PublicMessage(), y.PublicMessage()) && x.HorseBack() == y.HorseBack() && x.Rattle() == y.Rattle() && x.Convulsion() == y.Convulsion() && x.Expansion() == y.Expansion() && x.Status() == y.Status() && pb.Equal(&xr, &yr) && x.CreationTime().Equal(y.CreationTime()) } func equalPoison(x, y Poison) bool { return x.PoisonType() == y.PoisonType() && x.Expiration().Equal(y.Expiration()) && x.Manufacturer() == y.Manufacturer() && x.Potency() == y.Potency() } */ type Cartel struct { Headquarter source string creationDate time.Time boss string lastCrimeDate time.Time poisons []*Poison } func (p Cartel) Source() string { return p.source } func (p Cartel) CreationDate() time.Time { return p.creationDate } func (p Cartel) Boss() string { return p.boss } func (p Cartel) LastCrimeDate() time.Time { return p.lastCrimeDate } func (p Cartel) Poisons() []*Poison { return p.poisons } func (p *Cartel) SetSource(x string) { p.source = x } func (p *Cartel) SetCreationDate(x time.Time) { p.creationDate = x } func (p *Cartel) SetBoss(x string) { p.boss = x } func (p *Cartel) SetLastCrimeDate(x time.Time) { p.lastCrimeDate = x } func (p *Cartel) SetPoisons(x []*Poison) { p.poisons = x } type Headquarter struct { id uint64 location string subDivisions []string incorporatedDate time.Time metaData *pb.MetaData privateMessage []byte publicMessage []byte horseBack string rattle string convulsion bool expansion uint64 status pb.HoneyStatus restrictions pb.Restrictions creationTime time.Time } func (hq Headquarter) ID() uint64 { return hq.id } func (hq Headquarter) Location() string { return hq.location } func (hq Headquarter) SubDivisions() []string { return hq.subDivisions } func (hq Headquarter) IncorporatedDate() time.Time { return hq.incorporatedDate } func (hq Headquarter) MetaData() *pb.MetaData { return hq.metaData } func (hq Headquarter) PrivateMessage() []byte { return hq.privateMessage } func (hq Headquarter) PublicMessage() []byte { return hq.publicMessage } func (hq Headquarter) HorseBack() string { return hq.horseBack } func (hq Headquarter) Rattle() string { return hq.rattle } func (hq Headquarter) Convulsion() bool { return hq.convulsion } func (hq Headquarter) Expansion() uint64 { return hq.expansion } func (hq Headquarter) Status() pb.HoneyStatus { return hq.status } func (hq Headquarter) Restrictions() pb.Restrictions { return hq.restrictions } func (hq Headquarter) CreationTime() time.Time { return hq.creationTime } func (hq *Headquarter) SetID(x uint64) { hq.id = x } func (hq *Headquarter) SetLocation(x string) { hq.location = x } func (hq *Headquarter) SetSubDivisions(x []string) { hq.subDivisions = x } func (hq *Headquarter) SetIncorporatedDate(x time.Time) { hq.incorporatedDate = x } func (hq *Headquarter) SetMetaData(x *pb.MetaData) { hq.metaData = x } func (hq *Headquarter) SetPrivateMessage(x []byte) { hq.privateMessage = x } func (hq *Headquarter) SetPublicMessage(x []byte) { hq.publicMessage = x } func (hq *Headquarter) SetHorseBack(x string) { hq.horseBack = x } func (hq *Headquarter) SetRattle(x string) { hq.rattle = x } func (hq *Headquarter) SetConvulsion(x bool) { hq.convulsion = x } func (hq *Headquarter) SetExpansion(x uint64) { hq.expansion = x } func (hq *Headquarter) SetStatus(x pb.HoneyStatus) { hq.status = x } func (hq *Headquarter) SetRestrictions(x pb.Restrictions) { hq.restrictions = x } func (hq *Headquarter) SetCreationTime(x time.Time) { hq.creationTime = x } type Poison struct { poisonType pb.PoisonType expiration time.Time manufacturer string potency int } func (p Poison) PoisonType() pb.PoisonType { return p.poisonType } func (p Poison) Expiration() time.Time { return p.expiration } func (p Poison) Manufacturer() string { return p.manufacturer } func (p Poison) Potency() int { return p.potency } func (p *Poison) SetPoisonType(x pb.PoisonType) { p.poisonType = x } func (p *Poison) SetExpiration(x time.Time) { p.expiration = x } func (p *Poison) SetManufacturer(x string) { p.manufacturer = x } func (p *Poison) SetPotency(x int) { p.potency = x } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/structs.go0000644000000000000000000001216515024302467030160 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package teststructs type InterfaceA interface { InterfaceA() } type ( StructA struct{ X string } // Equal method on value receiver StructB struct{ X string } // Equal method on pointer receiver StructC struct{ X string } // Equal method (with interface argument) on value receiver StructD struct{ X string } // Equal method (with interface argument) on pointer receiver StructE struct{ X string } // Equal method (with interface argument on value receiver) on pointer receiver StructF struct{ X string } // Equal method (with interface argument on pointer receiver) on value receiver // These embed the above types as a value. StructA1 struct { StructA X string } StructB1 struct { StructB X string } StructC1 struct { StructC X string } StructD1 struct { StructD X string } StructE1 struct { StructE X string } StructF1 struct { StructF X string } // These embed the above types as a pointer. StructA2 struct { *StructA X string } StructB2 struct { *StructB X string } StructC2 struct { *StructC X string } StructD2 struct { *StructD X string } StructE2 struct { *StructE X string } StructF2 struct { *StructF X string } StructNo struct{ X string } // Equal method (with interface argument) on non-satisfying receiver AssignA func() int AssignB struct{ A int } AssignC chan bool AssignD <-chan bool ) func (x StructA) Equal(y StructA) bool { return true } func (x *StructB) Equal(y *StructB) bool { return true } func (x StructC) Equal(y InterfaceA) bool { return true } func (x StructC) InterfaceA() {} func (x *StructD) Equal(y InterfaceA) bool { return true } func (x *StructD) InterfaceA() {} func (x *StructE) Equal(y InterfaceA) bool { return true } func (x StructE) InterfaceA() {} func (x StructF) Equal(y InterfaceA) bool { return true } func (x *StructF) InterfaceA() {} func (x StructNo) Equal(y InterfaceA) bool { return true } func (x AssignA) Equal(y func() int) bool { return true } func (x AssignB) Equal(y struct{ A int }) bool { return true } func (x AssignC) Equal(y chan bool) bool { return true } func (x AssignD) Equal(y <-chan bool) bool { return true } var _ = func( a StructA, b StructB, c StructC, d StructD, e StructE, f StructF, ap *StructA, bp *StructB, cp *StructC, dp *StructD, ep *StructE, fp *StructF, a1 StructA1, b1 StructB1, c1 StructC1, d1 StructD1, e1 StructE1, f1 StructF1, a2 StructA2, b2 StructB2, c2 StructC2, d2 StructD2, e2 StructE2, f2 StructF1, ) { a.Equal(a) b.Equal(&b) c.Equal(c) d.Equal(&d) e.Equal(e) f.Equal(&f) ap.Equal(*ap) bp.Equal(bp) cp.Equal(*cp) dp.Equal(dp) ep.Equal(*ep) fp.Equal(fp) a1.Equal(a1.StructA) b1.Equal(&b1.StructB) c1.Equal(c1) d1.Equal(&d1) e1.Equal(e1) f1.Equal(&f1) a2.Equal(*a2.StructA) b2.Equal(b2.StructB) c2.Equal(c2) d2.Equal(&d2) e2.Equal(e2) f2.Equal(&f2) } type ( privateStruct struct{ Public, private int } PublicStruct struct{ Public, private int } ParentStructA struct{ privateStruct } ParentStructB struct{ PublicStruct } ParentStructC struct { privateStruct Public, private int } ParentStructD struct { PublicStruct Public, private int } ParentStructE struct { privateStruct PublicStruct } ParentStructF struct { privateStruct PublicStruct Public, private int } ParentStructG struct { *privateStruct } ParentStructH struct { *PublicStruct } ParentStructI struct { *privateStruct *PublicStruct } ParentStructJ struct { *privateStruct *PublicStruct Public PublicStruct private privateStruct } ) func NewParentStructG() *ParentStructG { return &ParentStructG{new(privateStruct)} } func NewParentStructH() *ParentStructH { return &ParentStructH{new(PublicStruct)} } func NewParentStructI() *ParentStructI { return &ParentStructI{new(privateStruct), new(PublicStruct)} } func NewParentStructJ() *ParentStructJ { return &ParentStructJ{ privateStruct: new(privateStruct), PublicStruct: new(PublicStruct), } } func (s *privateStruct) SetPrivate(i int) { s.private = i } func (s *PublicStruct) SetPrivate(i int) { s.private = i } func (s *ParentStructC) SetPrivate(i int) { s.private = i } func (s *ParentStructD) SetPrivate(i int) { s.private = i } func (s *ParentStructF) SetPrivate(i int) { s.private = i } func (s *ParentStructA) PrivateStruct() *privateStruct { return &s.privateStruct } func (s *ParentStructC) PrivateStruct() *privateStruct { return &s.privateStruct } func (s *ParentStructE) PrivateStruct() *privateStruct { return &s.privateStruct } func (s *ParentStructF) PrivateStruct() *privateStruct { return &s.privateStruct } func (s *ParentStructG) PrivateStruct() *privateStruct { return s.privateStruct } func (s *ParentStructI) PrivateStruct() *privateStruct { return s.privateStruct } func (s *ParentStructJ) PrivateStruct() *privateStruct { return s.privateStruct } func (s *ParentStructJ) Private() *privateStruct { return &s.private } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/foo2/0000755000000000000000000000000015024302467026762 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/foo2/foo.go0000644000000000000000000000061715024302467030100 0ustar rootroot// Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package foo is deliberately named differently than the parent directory. // It contain declarations that have ambiguity in their short names, // relative to a different package also called foo. package foo type Bar struct{ S string } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/project2.go0000644000000000000000000000354515024302467030203 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package teststructs import ( "time" pb "github.com/google/go-cmp/cmp/internal/testprotos" ) // This is an sanitized example of equality from a real use-case. // The original equality function was as follows: /* func equalBatch(b1, b2 *GermBatch) bool { for _, b := range []*GermBatch{b1, b2} { for _, l := range b.DirtyGerms { sort.Slice(l, func(i, j int) bool { return l[i].String() < l[j].String() }) } for _, l := range b.CleanGerms { sort.Slice(l, func(i, j int) bool { return l[i].String() < l[j].String() }) } } if !pb.DeepEqual(b1.DirtyGerms, b2.DirtyGerms) || !pb.DeepEqual(b1.CleanGerms, b2.CleanGerms) || !pb.DeepEqual(b1.GermMap, b2.GermMap) { return false } if len(b1.DishMap) != len(b2.DishMap) { return false } for id := range b1.DishMap { kpb1, err1 := b1.DishMap[id].Proto() kpb2, err2 := b2.DishMap[id].Proto() if !pb.Equal(kpb1, kpb2) || !reflect.DeepEqual(err1, err2) { return false } } return b1.HasPreviousResult == b2.HasPreviousResult && b1.DirtyID == b2.DirtyID && b1.CleanID == b2.CleanID && b1.GermStrain == b2.GermStrain && b1.TotalDirtyGerms == b2.TotalDirtyGerms && b1.InfectedAt.Equal(b2.InfectedAt) } */ type GermBatch struct { DirtyGerms, CleanGerms map[int32][]*pb.Germ GermMap map[int32]*pb.Germ DishMap map[int32]*Dish HasPreviousResult bool DirtyID, CleanID int32 GermStrain int32 TotalDirtyGerms int InfectedAt time.Time } type Dish struct { pb *pb.Dish err error } func CreateDish(m *pb.Dish, err error) *Dish { return &Dish{pb: m, err: err} } func (d *Dish) Proto() (*pb.Dish, error) { if d.err != nil { return nil, d.err } return d.pb, nil } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/foo1/0000755000000000000000000000000015024302467026761 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/foo1/foo.go0000644000000000000000000000061715024302467030077 0ustar rootroot// Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package foo is deliberately named differently than the parent directory. // It contain declarations that have ambiguity in their short names, // relative to a different package also called foo. package foo type Bar struct{ S string } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/project3.go0000644000000000000000000000400115024302467030170 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package teststructs import ( "sync" pb "github.com/google/go-cmp/cmp/internal/testprotos" ) // This is an sanitized example of equality from a real use-case. // The original equality function was as follows: /* func equalDirt(x, y *Dirt) bool { if !reflect.DeepEqual(x.table, y.table) || !reflect.DeepEqual(x.ts, y.ts) || x.Discord != y.Discord || !pb.Equal(&x.Proto, &y.Proto) || len(x.wizard) != len(y.wizard) || len(x.sadistic) != len(y.sadistic) || x.lastTime != y.lastTime { return false } for k, vx := range x.wizard { vy, ok := y.wizard[k] if !ok || !pb.Equal(vx, vy) { return false } } for k, vx := range x.sadistic { vy, ok := y.sadistic[k] if !ok || !pb.Equal(vx, vy) { return false } } return true } */ type FakeMutex struct { sync.Locker x struct{} } type Dirt struct { table Table // Always concrete type of MockTable ts Timestamp Discord DiscordState Proto pb.Dirt wizard map[string]*pb.Wizard sadistic map[string]*pb.Sadistic lastTime int64 mu FakeMutex } type DiscordState int type Timestamp int64 func (d *Dirt) SetTable(t Table) { d.table = t } func (d *Dirt) SetTimestamp(t Timestamp) { d.ts = t } func (d *Dirt) SetWizard(m map[string]*pb.Wizard) { d.wizard = m } func (d *Dirt) SetSadistic(m map[string]*pb.Sadistic) { d.sadistic = m } func (d *Dirt) SetLastTime(t int64) { d.lastTime = t } type Table interface { Operation1() error Operation2() error Operation3() error } type MockTable struct { state []string } func CreateMockTable(s []string) *MockTable { return &MockTable{s} } func (mt *MockTable) Operation1() error { return nil } func (mt *MockTable) Operation2() error { return nil } func (mt *MockTable) Operation3() error { return nil } func (mt *MockTable) State() []string { return mt.state } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/teststructs/project1.go0000644000000000000000000001327215024302467030200 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package teststructs import ( "time" pb "github.com/google/go-cmp/cmp/internal/testprotos" ) // This is an sanitized example of equality from a real use-case. // The original equality function was as follows: /* func equalEagle(x, y Eagle) bool { if x.Name != y.Name && !reflect.DeepEqual(x.Hounds, y.Hounds) && x.Desc != y.Desc && x.DescLong != y.DescLong && x.Prong != y.Prong && x.StateGoverner != y.StateGoverner && x.PrankRating != y.PrankRating && x.FunnyPrank != y.FunnyPrank && !pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) { return false } if len(x.Dreamers) != len(y.Dreamers) { return false } for i := range x.Dreamers { if !equalDreamer(x.Dreamers[i], y.Dreamers[i]) { return false } } if len(x.Slaps) != len(y.Slaps) { return false } for i := range x.Slaps { if !equalSlap(x.Slaps[i], y.Slaps[i]) { return false } } return true } func equalDreamer(x, y Dreamer) bool { if x.Name != y.Name || x.Desc != y.Desc || x.DescLong != y.DescLong || x.ContSlapsInterval != y.ContSlapsInterval || x.Ornamental != y.Ornamental || x.Amoeba != y.Amoeba || x.Heroes != y.Heroes || x.FloppyDisk != y.FloppyDisk || x.MightiestDuck != y.MightiestDuck || x.FunnyPrank != y.FunnyPrank || !pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) { return false } if len(x.Animal) != len(y.Animal) { return false } for i := range x.Animal { vx := x.Animal[i] vy := y.Animal[i] if reflect.TypeOf(x.Animal) != reflect.TypeOf(y.Animal) { return false } switch vx.(type) { case Goat: if !equalGoat(vx.(Goat), vy.(Goat)) { return false } case Donkey: if !equalDonkey(vx.(Donkey), vy.(Donkey)) { return false } default: panic(fmt.Sprintf("unknown type: %T", vx)) } } if len(x.PreSlaps) != len(y.PreSlaps) { return false } for i := range x.PreSlaps { if !equalSlap(x.PreSlaps[i], y.PreSlaps[i]) { return false } } if len(x.ContSlaps) != len(y.ContSlaps) { return false } for i := range x.ContSlaps { if !equalSlap(x.ContSlaps[i], y.ContSlaps[i]) { return false } } return true } func equalSlap(x, y Slap) bool { return x.Name == y.Name && x.Desc == y.Desc && x.DescLong == y.DescLong && pb.Equal(x.Args, y.Args) && x.Tense == y.Tense && x.Interval == y.Interval && x.Homeland == y.Homeland && x.FunnyPrank == y.FunnyPrank && pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) } func equalGoat(x, y Goat) bool { if x.Target != y.Target || x.FunnyPrank != y.FunnyPrank || !pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) { return false } if len(x.Slaps) != len(y.Slaps) { return false } for i := range x.Slaps { if !equalSlap(x.Slaps[i], y.Slaps[i]) { return false } } return true } func equalDonkey(x, y Donkey) bool { return x.Pause == y.Pause && x.Sleep == y.Sleep && x.FunnyPrank == y.FunnyPrank && pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) } */ type Eagle struct { Name string Hounds []string Desc string DescLong string Dreamers []Dreamer Prong int64 Slaps []Slap StateGoverner string PrankRating string FunnyPrank string Immutable *EagleImmutable } type EagleImmutable struct { ID string State *pb.Eagle_States MissingCall *pb.Eagle_MissingCalls Birthday time.Time Death time.Time Started time.Time LastUpdate time.Time Creator string empty bool } type Dreamer struct { Name string Desc string DescLong string PreSlaps []Slap ContSlaps []Slap ContSlapsInterval int32 Animal []interface{} // Could be either Goat or Donkey Ornamental bool Amoeba int64 Heroes int32 FloppyDisk int32 MightiestDuck bool FunnyPrank string Immutable *DreamerImmutable } type DreamerImmutable struct { ID string State *pb.Dreamer_States MissingCall *pb.Dreamer_MissingCalls Calls int32 Started time.Time Stopped time.Time LastUpdate time.Time empty bool } type Slap struct { Name string Desc string DescLong string Args pb.Message Tense int32 Interval int32 Homeland uint32 FunnyPrank string Immutable *SlapImmutable } type SlapImmutable struct { ID string Out pb.Message MildSlap bool PrettyPrint string State *pb.Slap_States Started time.Time Stopped time.Time LastUpdate time.Time LoveRadius *LoveRadius empty bool } type Goat struct { Target string Slaps []Slap FunnyPrank string Immutable *GoatImmutable } type GoatImmutable struct { ID string State *pb.Goat_States Started time.Time Stopped time.Time LastUpdate time.Time empty bool } type Donkey struct { Pause bool Sleep int32 FunnyPrank string Immutable *DonkeyImmutable } type DonkeyImmutable struct { ID string State *pb.Donkey_States Started time.Time Stopped time.Time LastUpdate time.Time empty bool } type LoveRadius struct { Summer *SummerLove empty bool } type SummerLove struct { Summary *SummerLoveSummary empty bool } type SummerLoveSummary struct { Devices []string ChangeType []pb.SummerType empty bool } func (EagleImmutable) Proto() *pb.Eagle { return nil } func (DreamerImmutable) Proto() *pb.Dreamer { return nil } func (SlapImmutable) Proto() *pb.Slap { return nil } func (GoatImmutable) Proto() *pb.Goat { return nil } func (DonkeyImmutable) Proto() *pb.Donkey { return nil } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/flags/0000755000000000000000000000000015024302467024602 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/flags/flags.go0000644000000000000000000000046715024302467026234 0ustar rootroot// Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flags // Deterministic controls whether the output of Diff should be deterministic. // This is only used for testing. var Deterministic bool dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/function/0000755000000000000000000000000015024302467025333 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/function/func_test.go0000644000000000000000000000304315024302467027654 0ustar rootroot// Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package function import ( "bytes" "reflect" "testing" ) type myType struct{ bytes.Buffer } func (myType) valueMethod() {} func (myType) ValueMethod() {} func (*myType) pointerMethod() {} func (*myType) PointerMethod() {} func TestNameOf(t *testing.T) { tests := []struct { fnc interface{} want string }{ {TestNameOf, "function.TestNameOf"}, {func() {}, "function.TestNameOf.func1"}, {(myType).valueMethod, "function.myType.valueMethod"}, {(myType).ValueMethod, "function.myType.ValueMethod"}, {(myType{}).valueMethod, "function.myType.valueMethod"}, {(myType{}).ValueMethod, "function.myType.ValueMethod"}, {(*myType).valueMethod, "function.myType.valueMethod"}, {(*myType).ValueMethod, "function.myType.ValueMethod"}, {(&myType{}).valueMethod, "function.myType.valueMethod"}, {(&myType{}).ValueMethod, "function.myType.ValueMethod"}, {(*myType).pointerMethod, "function.myType.pointerMethod"}, {(*myType).PointerMethod, "function.myType.PointerMethod"}, {(&myType{}).pointerMethod, "function.myType.pointerMethod"}, {(&myType{}).PointerMethod, "function.myType.PointerMethod"}, {(*myType).Write, "function.myType.Write"}, {(&myType{}).Write, "bytes.Buffer.Write"}, } for _, tt := range tests { t.Run("", func(t *testing.T) { got := NameOf(reflect.ValueOf(tt.fnc)) if got != tt.want { t.Errorf("NameOf() = %v, want %v", got, tt.want) } }) } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/function/func.go0000644000000000000000000000537015024302467026622 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package function provides functionality for identifying function types. package function import ( "reflect" "regexp" "runtime" "strings" ) type funcType int const ( _ funcType = iota tbFunc // func(T) bool ttbFunc // func(T, T) bool ttiFunc // func(T, T) int trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R Equal = ttbFunc // func(T, T) bool EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool Transformer = trFunc // func(T) R ValueFilter = ttbFunc // func(T, T) bool Less = ttbFunc // func(T, T) bool Compare = ttiFunc // func(T, T) int ValuePredicate = tbFunc // func(T) bool KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) var intType = reflect.TypeOf(0) // IsType reports whether the reflect.Type is of the specified function type. func IsType(t reflect.Type, ft funcType) bool { if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { return false } ni, no := t.NumIn(), t.NumOut() switch ft { case tbFunc: // func(T) bool if ni == 1 && no == 1 && t.Out(0) == boolType { return true } case ttbFunc: // func(T, T) bool if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } case ttiFunc: // func(T, T) int if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { return true } case trbFunc: // func(T, R) bool if ni == 2 && no == 1 && t.Out(0) == boolType { return true } case tibFunc: // func(T, I) bool if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { return true } case trFunc: // func(T) R if ni == 1 && no == 1 { return true } } return false } var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) // NameOf returns the name of the function value. func NameOf(v reflect.Value) string { fnc := runtime.FuncForPC(v.Pointer()) if fnc == nil { return "" } fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" // Method closures have a "-fm" suffix. fullName = strings.TrimSuffix(fullName, "-fm") var name string for len(fullName) > 0 { inParen := strings.HasSuffix(fullName, ")") fullName = strings.TrimSuffix(fullName, ")") s := lastIdentRx.FindString(fullName) if s == "" { break } name = s + "." + name fullName = strings.TrimSuffix(fullName, s) if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { fullName = fullName[:i] } fullName = strings.TrimSuffix(fullName, ".") } return strings.TrimSuffix(name, ".") } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/testprotos/0000755000000000000000000000000015024302467025734 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/internal/testprotos/protos.go0000644000000000000000000000307715024302467027620 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package testprotos func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil } return x.String() == y.String() } type Message interface { Proto() String() string } type proto interface { Proto() } type notComparable struct { unexportedField func() } type Stringer struct{ X string } func (s *Stringer) String() string { return s.X } // Project1 protocol buffers type ( Eagle_States int Eagle_MissingCalls int Dreamer_States int Dreamer_MissingCalls int Slap_States int Goat_States int Donkey_States int SummerType int Eagle struct { proto notComparable Stringer } Dreamer struct { proto notComparable Stringer } Slap struct { proto notComparable Stringer } Goat struct { proto notComparable Stringer } Donkey struct { proto notComparable Stringer } ) // Project2 protocol buffers type ( Germ struct { proto notComparable Stringer } Dish struct { proto notComparable Stringer } ) // Project3 protocol buffers type ( Dirt struct { proto notComparable Stringer } Wizard struct { proto notComparable Stringer } Sadistic struct { proto notComparable Stringer } ) // Project4 protocol buffers type ( HoneyStatus int PoisonType int MetaData struct { proto notComparable Stringer } Restrictions struct { proto notComparable Stringer } ) dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/report_text.go0000644000000000000000000002675715024302467024621 0ustar rootroot// Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "bytes" "fmt" "math/rand" "strings" "time" "unicode/utf8" "github.com/google/go-cmp/cmp/internal/flags" ) var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 const maxColumnLength = 80 type indentMode int func (n indentMode) appendIndent(b []byte, d diffMode) []byte { // The output of Diff is documented as being unstable to provide future // flexibility in changing the output for more humanly readable reports. // This logic intentionally introduces instability to the exact output // so that users can detect accidental reliance on stability early on, // rather than much later when an actual change to the format occurs. if flags.Deterministic || randBool { // Use regular spaces (U+0020). switch d { case diffUnknown, diffIdentical: b = append(b, " "...) case diffRemoved: b = append(b, "- "...) case diffInserted: b = append(b, "+ "...) } } else { // Use non-breaking spaces (U+00a0). switch d { case diffUnknown, diffIdentical: b = append(b, "  "...) case diffRemoved: b = append(b, "- "...) case diffInserted: b = append(b, "+ "...) } } return repeatCount(n).appendChar(b, '\t') } type repeatCount int func (n repeatCount) appendChar(b []byte, c byte) []byte { for ; n > 0; n-- { b = append(b, c) } return b } // textNode is a simplified tree-based representation of structured text. // Possible node types are textWrap, textList, or textLine. type textNode interface { // Len reports the length in bytes of a single-line version of the tree. // Nested textRecord.Diff and textRecord.Comment fields are ignored. Len() int // Equal reports whether the two trees are structurally identical. // Nested textRecord.Diff and textRecord.Comment fields are compared. Equal(textNode) bool // String returns the string representation of the text tree. // It is not guaranteed that len(x.String()) == x.Len(), // nor that x.String() == y.String() implies that x.Equal(y). String() string // formatCompactTo formats the contents of the tree as a single-line string // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment // fields are ignored. // // However, not all nodes in the tree should be collapsed as a single-line. // If a node can be collapsed as a single-line, it is replaced by a textLine // node. Since the top-level node cannot replace itself, this also returns // the current node itself. // // This does not mutate the receiver. formatCompactTo([]byte, diffMode) ([]byte, textNode) // formatExpandedTo formats the contents of the tree as a multi-line string // to the provided buffer. In order for column alignment to operate well, // formatCompactTo must be called before calling formatExpandedTo. formatExpandedTo([]byte, diffMode, indentMode) []byte } // textWrap is a wrapper that concatenates a prefix and/or a suffix // to the underlying node. type textWrap struct { Prefix string // e.g., "bytes.Buffer{" Value textNode // textWrap | textList | textLine Suffix string // e.g., "}" Metadata interface{} // arbitrary metadata; has no effect on formatting } func (s *textWrap) Len() int { return len(s.Prefix) + s.Value.Len() + len(s.Suffix) } func (s1 *textWrap) Equal(s2 textNode) bool { if s2, ok := s2.(*textWrap); ok { return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix } return false } func (s *textWrap) String() string { var d diffMode var n indentMode _, s2 := s.formatCompactTo(nil, d) b := n.appendIndent(nil, d) // Leading indent b = s2.formatExpandedTo(b, d, n) // Main body b = append(b, '\n') // Trailing newline return string(b) } func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { n0 := len(b) // Original buffer length b = append(b, s.Prefix...) b, s.Value = s.Value.formatCompactTo(b, d) b = append(b, s.Suffix...) if _, ok := s.Value.(textLine); ok { return b, textLine(b[n0:]) } return b, s } func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = append(b, s.Prefix...) b = s.Value.formatExpandedTo(b, d, n) b = append(b, s.Suffix...) return b } // textList is a comma-separated list of textWrap or textLine nodes. // The list may be formatted as multi-lines or single-line at the discretion // of the textList.formatCompactTo method. type textList []textRecord type textRecord struct { Diff diffMode // e.g., 0 or '-' or '+' Key string // e.g., "MyField" Value textNode // textWrap | textLine ElideComma bool // avoid trailing comma Comment fmt.Stringer // e.g., "6 identical fields" } // AppendEllipsis appends a new ellipsis node to the list if none already // exists at the end. If cs is non-zero it coalesces the statistics with the // previous diffStats. func (s *textList) AppendEllipsis(ds diffStats) { hasStats := !ds.IsZero() if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { if hasStats { *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) } else { *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) } return } if hasStats { (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) } } func (s textList) Len() (n int) { for i, r := range s { n += len(r.Key) if r.Key != "" { n += len(": ") } n += r.Value.Len() if i < len(s)-1 { n += len(", ") } } return n } func (s1 textList) Equal(s2 textNode) bool { if s2, ok := s2.(textList); ok { if len(s1) != len(s2) { return false } for i := range s1 { r1, r2 := s1[i], s2[i] if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { return false } } return true } return false } func (s textList) String() string { return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() } func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { s = append(textList(nil), s...) // Avoid mutating original // Determine whether we can collapse this list as a single line. n0 := len(b) // Original buffer length var multiLine bool for i, r := range s { if r.Diff == diffInserted || r.Diff == diffRemoved { multiLine = true } b = append(b, r.Key...) if r.Key != "" { b = append(b, ": "...) } b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) if _, ok := s[i].Value.(textLine); !ok { multiLine = true } if r.Comment != nil { multiLine = true } if i < len(s)-1 { b = append(b, ", "...) } } // Force multi-lined output when printing a removed/inserted node that // is sufficiently long. if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { multiLine = true } if !multiLine { return b, textLine(b[n0:]) } return b, s } func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { alignKeyLens := s.alignLens( func(r textRecord) bool { _, isLine := r.Value.(textLine) return r.Key == "" || !isLine }, func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, ) alignValueLens := s.alignLens( func(r textRecord) bool { _, isLine := r.Value.(textLine) return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil }, func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, ) // Format lists of simple lists in a batched form. // If the list is sequence of only textLine values, // then batch multiple values on a single line. var isSimple bool for _, r := range s { _, isLine := r.Value.(textLine) isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil if !isSimple { break } } if isSimple { n++ var batch []byte emitBatch := func() { if len(batch) > 0 { b = n.appendIndent(append(b, '\n'), d) b = append(b, bytes.TrimRight(batch, " ")...) batch = batch[:0] } } for _, r := range s { line := r.Value.(textLine) if len(batch)+len(line)+len(", ") > maxColumnLength { emitBatch() } batch = append(batch, line...) batch = append(batch, ", "...) } emitBatch() n-- return n.appendIndent(append(b, '\n'), d) } // Format the list as a multi-lined output. n++ for i, r := range s { b = n.appendIndent(append(b, '\n'), d|r.Diff) if r.Key != "" { b = append(b, r.Key+": "...) } b = alignKeyLens[i].appendChar(b, ' ') b = r.Value.formatExpandedTo(b, d|r.Diff, n) if !r.ElideComma { b = append(b, ',') } b = alignValueLens[i].appendChar(b, ' ') if r.Comment != nil { b = append(b, " // "+r.Comment.String()...) } } n-- return n.appendIndent(append(b, '\n'), d) } func (s textList) alignLens( skipFunc func(textRecord) bool, lenFunc func(textRecord) int, ) []repeatCount { var startIdx, endIdx, maxLen int lens := make([]repeatCount, len(s)) for i, r := range s { if skipFunc(r) { for j := startIdx; j < endIdx && j < len(s); j++ { lens[j] = repeatCount(maxLen - lenFunc(s[j])) } startIdx, endIdx, maxLen = i+1, i+1, 0 } else { if maxLen < lenFunc(r) { maxLen = lenFunc(r) } endIdx = i + 1 } } for j := startIdx; j < endIdx && j < len(s); j++ { lens[j] = repeatCount(maxLen - lenFunc(s[j])) } return lens } // textLine is a single-line segment of text and is always a leaf node // in the textNode tree. type textLine []byte var ( textNil = textLine("nil") textEllipsis = textLine("...") ) func (s textLine) Len() int { return len(s) } func (s1 textLine) Equal(s2 textNode) bool { if s2, ok := s2.(textLine); ok { return bytes.Equal([]byte(s1), []byte(s2)) } return false } func (s textLine) String() string { return string(s) } func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { return append(b, s...), s } func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { return append(b, s...) } type diffStats struct { Name string NumIgnored int NumIdentical int NumRemoved int NumInserted int NumModified int } func (s diffStats) IsZero() bool { s.Name = "" return s == diffStats{} } func (s diffStats) NumDiff() int { return s.NumRemoved + s.NumInserted + s.NumModified } func (s diffStats) Append(ds diffStats) diffStats { assert(s.Name == ds.Name) s.NumIgnored += ds.NumIgnored s.NumIdentical += ds.NumIdentical s.NumRemoved += ds.NumRemoved s.NumInserted += ds.NumInserted s.NumModified += ds.NumModified return s } // String prints a humanly-readable summary of coalesced records. // // Example: // // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string var sum int labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} for i, n := range counts { if n > 0 { ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) } sum += n } // Pluralize the name (adjusting for some obscure English grammar rules). name := s.Name if sum > 1 { name += "s" if strings.HasSuffix(name, "ys") { name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" } } // Format the list according to English grammar (with Oxford comma). switch n := len(ss); n { case 0: return "" case 1, 2: return strings.Join(ss, " and ") + " " + name default: return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name } } type commentString string func (s commentString) String() string { return string(s) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/report_value.go0000644000000000000000000000705515024302467024737 0ustar rootroot// Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import "reflect" // valueNode represents a single node within a report, which is a // structured representation of the value tree, containing information // regarding which nodes are equal or not. type valueNode struct { parent *valueNode Type reflect.Type ValueX reflect.Value ValueY reflect.Value // NumSame is the number of leaf nodes that are equal. // All descendants are equal only if NumDiff is 0. NumSame int // NumDiff is the number of leaf nodes that are not equal. NumDiff int // NumIgnored is the number of leaf nodes that are ignored. NumIgnored int // NumCompared is the number of leaf nodes that were compared // using an Equal method or Comparer function. NumCompared int // NumTransformed is the number of non-leaf nodes that were transformed. NumTransformed int // NumChildren is the number of transitive descendants of this node. // This counts from zero; thus, leaf nodes have no descendants. NumChildren int // MaxDepth is the maximum depth of the tree. This counts from zero; // thus, leaf nodes have a depth of zero. MaxDepth int // Records is a list of struct fields, slice elements, or map entries. Records []reportRecord // If populated, implies Value is not populated // Value is the result of a transformation, pointer indirect, of // type assertion. Value *valueNode // If populated, implies Records is not populated // TransformerName is the name of the transformer. TransformerName string // If non-empty, implies Value is populated } type reportRecord struct { Key reflect.Value // Invalid for slice element Value *valueNode } func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { vx, vy := ps.Values() child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} switch s := ps.(type) { case StructField: assert(parent.Value == nil) parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) case SliceIndex: assert(parent.Value == nil) parent.Records = append(parent.Records, reportRecord{Value: child}) case MapIndex: assert(parent.Value == nil) parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) case Indirect: assert(parent.Value == nil && parent.Records == nil) parent.Value = child case TypeAssertion: assert(parent.Value == nil && parent.Records == nil) parent.Value = child case Transform: assert(parent.Value == nil && parent.Records == nil) parent.Value = child parent.TransformerName = s.Name() parent.NumTransformed++ default: assert(parent == nil) // Must be the root step } return child } func (r *valueNode) Report(rs Result) { assert(r.MaxDepth == 0) // May only be called on leaf nodes if rs.ByIgnore() { r.NumIgnored++ } else { if rs.Equal() { r.NumSame++ } else { r.NumDiff++ } } assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) if rs.ByMethod() { r.NumCompared++ } if rs.ByFunc() { r.NumCompared++ } assert(r.NumCompared <= 1) } func (child *valueNode) PopStep() (parent *valueNode) { if child.parent == nil { return nil } parent = child.parent parent.NumSame += child.NumSame parent.NumDiff += child.NumDiff parent.NumIgnored += child.NumIgnored parent.NumCompared += child.NumCompared parent.NumTransformed += child.NumTransformed parent.NumChildren += child.NumChildren + 1 if parent.MaxDepth < child.MaxDepth+1 { parent.MaxDepth = child.MaxDepth + 1 } return parent } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/example_test.go0000644000000000000000000002650515024302467024723 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp_test import ( "fmt" "math" "net" "reflect" "sort" "strings" "time" "github.com/google/go-cmp/cmp" ) // TODO: Re-write these examples in terms of how you actually use the // fundamental options and filters and not in terms of what cool things you can // do with them since that overlaps with cmp/cmpopts. // Use Diff to print out a human-readable report of differences for tests // comparing nested or structured data. func ExampleDiff_testing() { // Let got be the hypothetical value obtained from some logic under test // and want be the expected golden data. got, want := MakeGatewayInfo() if diff := cmp.Diff(want, got); diff != "" { t.Errorf("MakeGatewayInfo() mismatch (-want +got):\n%s", diff) } // Output: // MakeGatewayInfo() mismatch (-want +got): // cmp_test.Gateway{ // SSID: "CoffeeShopWiFi", // - IPAddress: s"192.168.0.2", // + IPAddress: s"192.168.0.1", // NetMask: s"ffff0000", // Clients: []cmp_test.Client{ // ... // 2 identical elements // {Hostname: "macchiato", IPAddress: s"192.168.0.153", LastSeen: s"2009-11-10 23:39:43 +0000 UTC"}, // {Hostname: "espresso", IPAddress: s"192.168.0.121"}, // { // Hostname: "latte", // - IPAddress: s"192.168.0.221", // + IPAddress: s"192.168.0.219", // LastSeen: s"2009-11-10 23:00:23 +0000 UTC", // }, // + { // + Hostname: "americano", // + IPAddress: s"192.168.0.188", // + LastSeen: s"2009-11-10 23:03:05 +0000 UTC", // + }, // }, // } } // Approximate equality for floats can be handled by defining a custom // comparer on floats that determines two values to be equal if they are within // some range of each other. // // This example is for demonstrative purposes; // use [github.com/google/go-cmp/cmp/cmpopts.EquateApprox] instead. func ExampleOption_approximateFloats() { // This Comparer only operates on float64. // To handle float32s, either define a similar function for that type // or use a Transformer to convert float32s into float64s. opt := cmp.Comparer(func(x, y float64) bool { delta := math.Abs(x - y) mean := math.Abs(x+y) / 2.0 return delta/mean < 0.00001 }) x := []float64{1.0, 1.1, 1.2, math.Pi} y := []float64{1.0, 1.1, 1.2, 3.14159265359} // Accurate enough to Pi z := []float64{1.0, 1.1, 1.2, 3.1415} // Diverges too far from Pi fmt.Println(cmp.Equal(x, y, opt)) fmt.Println(cmp.Equal(y, z, opt)) fmt.Println(cmp.Equal(z, x, opt)) // Output: // true // false // false } // Normal floating-point arithmetic defines == to be false when comparing // NaN with itself. In certain cases, this is not the desired property. // // This example is for demonstrative purposes; // use [github.com/google/go-cmp/cmp/cmpopts.EquateNaNs] instead. func ExampleOption_equalNaNs() { // This Comparer only operates on float64. // To handle float32s, either define a similar function for that type // or use a Transformer to convert float32s into float64s. opt := cmp.Comparer(func(x, y float64) bool { return (math.IsNaN(x) && math.IsNaN(y)) || x == y }) x := []float64{1.0, math.NaN(), math.E, 0.0} y := []float64{1.0, math.NaN(), math.E, 0.0} z := []float64{1.0, math.NaN(), math.Pi, 0.0} // Pi constant instead of E fmt.Println(cmp.Equal(x, y, opt)) fmt.Println(cmp.Equal(y, z, opt)) fmt.Println(cmp.Equal(z, x, opt)) // Output: // true // false // false } // To have floating-point comparisons combine both properties of NaN being // equal to itself and also approximate equality of values, filters are needed // to restrict the scope of the comparison so that they are composable. // // This example is for demonstrative purposes; // use [github.com/google/go-cmp/cmp/cmpopts.EquateApprox] instead. func ExampleOption_equalNaNsAndApproximateFloats() { alwaysEqual := cmp.Comparer(func(_, _ interface{}) bool { return true }) opts := cmp.Options{ // This option declares that a float64 comparison is equal only if // both inputs are NaN. cmp.FilterValues(func(x, y float64) bool { return math.IsNaN(x) && math.IsNaN(y) }, alwaysEqual), // This option declares approximate equality on float64s only if // both inputs are not NaN. cmp.FilterValues(func(x, y float64) bool { return !math.IsNaN(x) && !math.IsNaN(y) }, cmp.Comparer(func(x, y float64) bool { delta := math.Abs(x - y) mean := math.Abs(x+y) / 2.0 return delta/mean < 0.00001 })), } x := []float64{math.NaN(), 1.0, 1.1, 1.2, math.Pi} y := []float64{math.NaN(), 1.0, 1.1, 1.2, 3.14159265359} // Accurate enough to Pi z := []float64{math.NaN(), 1.0, 1.1, 1.2, 3.1415} // Diverges too far from Pi fmt.Println(cmp.Equal(x, y, opts)) fmt.Println(cmp.Equal(y, z, opts)) fmt.Println(cmp.Equal(z, x, opts)) // Output: // true // false // false } // Sometimes, an empty map or slice is considered equal to an allocated one // of zero length. // // This example is for demonstrative purposes; // use [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty] instead. func ExampleOption_equalEmpty() { alwaysEqual := cmp.Comparer(func(_, _ interface{}) bool { return true }) // This option handles slices and maps of any type. opt := cmp.FilterValues(func(x, y interface{}) bool { vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) return (vx.IsValid() && vy.IsValid() && vx.Type() == vy.Type()) && (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && (vx.Len() == 0 && vy.Len() == 0) }, alwaysEqual) type S struct { A []int B map[string]bool } x := S{nil, make(map[string]bool, 100)} y := S{make([]int, 0, 200), nil} z := S{[]int{0}, nil} // []int has a single element (i.e., not empty) fmt.Println(cmp.Equal(x, y, opt)) fmt.Println(cmp.Equal(y, z, opt)) fmt.Println(cmp.Equal(z, x, opt)) // Output: // true // false // false } // Two slices may be considered equal if they have the same elements, // regardless of the order that they appear in. Transformations can be used // to sort the slice. // // This example is for demonstrative purposes; // use [github.com/google/go-cmp/cmp/cmpopts.SortSlices] instead. func ExampleOption_sortedSlice() { // This Transformer sorts a []int. trans := cmp.Transformer("Sort", func(in []int) []int { out := append([]int(nil), in...) // Copy input to avoid mutating it sort.Ints(out) return out }) x := struct{ Ints []int }{[]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}} y := struct{ Ints []int }{[]int{2, 8, 0, 9, 6, 1, 4, 7, 3, 5}} z := struct{ Ints []int }{[]int{0, 0, 1, 2, 3, 4, 5, 6, 7, 8}} fmt.Println(cmp.Equal(x, y, trans)) fmt.Println(cmp.Equal(y, z, trans)) fmt.Println(cmp.Equal(z, x, trans)) // Output: // true // false // false } type otherString string func (x otherString) Equal(y otherString) bool { return strings.EqualFold(string(x), string(y)) } // If the Equal method defined on a type is not suitable, the type can be // dynamically transformed to be stripped of the Equal method (or any method // for that matter). func ExampleOption_avoidEqualMethod() { // Suppose otherString.Equal performs a case-insensitive equality, // which is too loose for our needs. // We can avoid the methods of otherString by declaring a new type. type myString otherString // This transformer converts otherString to myString, allowing Equal to use // other Options to determine equality. trans := cmp.Transformer("", func(in otherString) myString { return myString(in) }) x := []otherString{"foo", "bar", "baz"} y := []otherString{"fOO", "bAr", "Baz"} // Same as before, but with different case fmt.Println(cmp.Equal(x, y)) // Equal because of case-insensitivity fmt.Println(cmp.Equal(x, y, trans)) // Not equal because of more exact equality // Output: // true // false } func roundF64(z float64) float64 { if z < 0 { return math.Ceil(z - 0.5) } return math.Floor(z + 0.5) } // The complex numbers complex64 and complex128 can really just be decomposed // into a pair of float32 or float64 values. It would be convenient to be able // define only a single comparator on float64 and have float32, complex64, and // complex128 all be able to use that comparator. Transformations can be used // to handle this. func ExampleOption_transformComplex() { opts := []cmp.Option{ // This transformer decomposes complex128 into a pair of float64s. cmp.Transformer("T1", func(in complex128) (out struct{ Real, Imag float64 }) { out.Real, out.Imag = real(in), imag(in) return out }), // This transformer converts complex64 to complex128 to allow the // above transform to take effect. cmp.Transformer("T2", func(in complex64) complex128 { return complex128(in) }), // This transformer converts float32 to float64. cmp.Transformer("T3", func(in float32) float64 { return float64(in) }), // This equality function compares float64s as rounded integers. cmp.Comparer(func(x, y float64) bool { return roundF64(x) == roundF64(y) }), } x := []interface{}{ complex128(3.0), complex64(5.1 + 2.9i), float32(-1.2), float64(12.3), } y := []interface{}{ complex128(3.1), complex64(4.9 + 3.1i), float32(-1.3), float64(11.7), } z := []interface{}{ complex128(3.8), complex64(4.9 + 3.1i), float32(-1.3), float64(11.7), } fmt.Println(cmp.Equal(x, y, opts...)) fmt.Println(cmp.Equal(y, z, opts...)) fmt.Println(cmp.Equal(z, x, opts...)) // Output: // true // false // false } type ( Gateway struct { SSID string IPAddress net.IP NetMask net.IPMask Clients []Client } Client struct { Hostname string IPAddress net.IP LastSeen time.Time } ) func MakeGatewayInfo() (x, y Gateway) { x = Gateway{ SSID: "CoffeeShopWiFi", IPAddress: net.IPv4(192, 168, 0, 1), NetMask: net.IPv4Mask(255, 255, 0, 0), Clients: []Client{{ Hostname: "ristretto", IPAddress: net.IPv4(192, 168, 0, 116), }, { Hostname: "arabica", IPAddress: net.IPv4(192, 168, 0, 104), LastSeen: time.Date(2009, time.November, 10, 23, 6, 32, 0, time.UTC), }, { Hostname: "macchiato", IPAddress: net.IPv4(192, 168, 0, 153), LastSeen: time.Date(2009, time.November, 10, 23, 39, 43, 0, time.UTC), }, { Hostname: "espresso", IPAddress: net.IPv4(192, 168, 0, 121), }, { Hostname: "latte", IPAddress: net.IPv4(192, 168, 0, 219), LastSeen: time.Date(2009, time.November, 10, 23, 0, 23, 0, time.UTC), }, { Hostname: "americano", IPAddress: net.IPv4(192, 168, 0, 188), LastSeen: time.Date(2009, time.November, 10, 23, 3, 5, 0, time.UTC), }}, } y = Gateway{ SSID: "CoffeeShopWiFi", IPAddress: net.IPv4(192, 168, 0, 2), NetMask: net.IPv4Mask(255, 255, 0, 0), Clients: []Client{{ Hostname: "ristretto", IPAddress: net.IPv4(192, 168, 0, 116), }, { Hostname: "arabica", IPAddress: net.IPv4(192, 168, 0, 104), LastSeen: time.Date(2009, time.November, 10, 23, 6, 32, 0, time.UTC), }, { Hostname: "macchiato", IPAddress: net.IPv4(192, 168, 0, 153), LastSeen: time.Date(2009, time.November, 10, 23, 39, 43, 0, time.UTC), }, { Hostname: "espresso", IPAddress: net.IPv4(192, 168, 0, 121), }, { Hostname: "latte", IPAddress: net.IPv4(192, 168, 0, 221), LastSeen: time.Date(2009, time.November, 10, 23, 0, 23, 0, time.UTC), }}, } return x, y } var t fakeT type fakeT struct{} func (t fakeT) Errorf(format string, args ...interface{}) { fmt.Printf(format+"\n", args...) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/example_reporter_test.go0000644000000000000000000000244315024302467026640 0ustar rootroot// Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp_test import ( "fmt" "strings" "github.com/google/go-cmp/cmp" ) // DiffReporter is a simple custom reporter that only records differences // detected during comparison. type DiffReporter struct { path cmp.Path diffs []string } func (r *DiffReporter) PushStep(ps cmp.PathStep) { r.path = append(r.path, ps) } func (r *DiffReporter) Report(rs cmp.Result) { if !rs.Equal() { vx, vy := r.path.Last().Values() r.diffs = append(r.diffs, fmt.Sprintf("%#v:\n\t-: %+v\n\t+: %+v\n", r.path, vx, vy)) } } func (r *DiffReporter) PopStep() { r.path = r.path[:len(r.path)-1] } func (r *DiffReporter) String() string { return strings.Join(r.diffs, "\n") } func ExampleReporter() { x, y := MakeGatewayInfo() var r DiffReporter cmp.Equal(x, y, cmp.Reporter(&r)) fmt.Print(r.String()) // Output: // {cmp_test.Gateway}.IPAddress: // -: 192.168.0.1 // +: 192.168.0.2 // // {cmp_test.Gateway}.Clients[4].IPAddress: // -: 192.168.0.219 // +: 192.168.0.221 // // {cmp_test.Gateway}.Clients[5->?]: // -: {Hostname:americano IPAddress:192.168.0.188 LastSeen:2009-11-10 23:03:05 +0000 UTC} // +: } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/report_slices.go0000644000000000000000000004763515024302467025115 0ustar rootroot// Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "bytes" "fmt" "math" "reflect" "strconv" "strings" "unicode" "unicode/utf8" "github.com/google/go-cmp/cmp/internal/diff" ) // CanFormatDiffSlice reports whether we support custom formatting for nodes // that are slices of primitive kinds or strings. func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { switch { case opts.DiffMode != diffUnknown: return false // Must be formatting in diff mode case v.NumDiff == 0: return false // No differences detected case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid case v.NumIgnored > 0: return false // Some ignore option was used case v.NumTransformed > 0: return false // Some transform option was used case v.NumCompared > 1: return false // More than one comparison was used case v.NumCompared == 1 && v.Type.Name() != "": // The need for cmp to check applicability of options on every element // in a slice is a significant performance detriment for large []byte. // The workaround is to specify Comparer(bytes.Equal), // which enables cmp to compare []byte more efficiently. // If they differ, we still want to provide batched diffing. // The logic disallows named types since they tend to have their own // String method, with nicer formatting than what this provides. return false } // Check whether this is an interface with the same concrete types. t := v.Type vx, vy := v.ValueX, v.ValueY if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { vx, vy = vx.Elem(), vy.Elem() t = vx.Type() } // Check whether we provide specialized diffing for this type. switch t.Kind() { case reflect.String: case reflect.Array, reflect.Slice: // Only slices of primitive types have specialized handling. switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: default: return false } // Both slice values have to be non-empty. if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { return false } // If a sufficient number of elements already differ, // use specialized formatting even if length requirement is not met. if v.NumDiff > v.NumSame { return true } default: return false } // Use specialized string diffing for longer slices or strings. const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } // FormatDiffSlice prints a diff for the slices (or strings) represented by v. // This provides custom-tailored logic to make printing of differences in // textual strings and slices of primitive kinds more readable. func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { assert(opts.DiffMode == diffUnknown) t, vx, vy := v.Type, v.ValueX, v.ValueY if t.Kind() == reflect.Interface { vx, vy = vx.Elem(), vy.Elem() t = vx.Type() opts = opts.WithTypeMode(emitType) } // Auto-detect the type of the data. var sx, sy string var ssx, ssy []string var isString, isMostlyText, isPureLinedText, isBinary bool switch { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: // Arrays need to be addressable for slice operations to work. vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() vx2.Set(vx) vy2.Set(vy) vx, vy = vx2, vy2 } if isString { var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int for i, r := range sx + sy { numTotalRunes++ if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { numValidRunes++ } if r == '\n' { if maxLineLen < i-lastLineIdx { maxLineLen = i - lastLineIdx } lastLineIdx = i + 1 numLines++ } } isPureText := numValidRunes == numTotalRunes isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 isBinary = !isMostlyText // Avoid diffing by lines if it produces a significantly more complex // edit script than diffing by bytes. if isPureLinedText { ssx = strings.Split(sx, "\n") ssy = strings.Split(sy, "\n") esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { return diff.BoolResult(ssx[ix] == ssy[iy]) }) esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { return diff.BoolResult(sx[ix] == sy[iy]) }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) quotedLength := len(strconv.Quote(sx + sy)) unquotedLength := len(sx) + len(sy) escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } // Format the string into printable records. var list textList var delim string switch { // If the text appears to be multi-lined text, // then perform differencing across individual lines. case isPureLinedText: list = opts.formatDiffSlice( reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", func(v reflect.Value, d diffMode) textRecord { s := formatString(v.Index(0).String()) return textRecord{Diff: d, Value: textLine(s)} }, ) delim = "\n" // If possible, use a custom triple-quote (""") syntax for printing // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: // - A line starts with `"""` // - A line starts with "..." // - A line contains non-printable characters // - Adjacent different lines differ only by whitespace // // For example: // // """ // ... // 3 identical lines // foo // bar // - baz // + BAZ // """ isTripleQuoted := true prevRemoveLines := map[string]bool{} prevInsertLines := map[string]bool{} var list2 textList list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) for _, r := range list { if !r.Value.Equal(textEllipsis) { line, _ := strconv.Unquote(string(r.Value.(textLine))) line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support normLine := strings.Map(func(r rune) rune { if unicode.IsSpace(r) { return -1 // drop whitespace to avoid visually indistinguishable output } return r }, line) isPrintable := func(r rune) bool { return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable } isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" switch r.Diff { case diffRemoved: isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] prevRemoveLines[normLine] = true case diffInserted: isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] prevInsertLines[normLine] = true } if !isTripleQuoted { break } r.Value = textLine(line) r.ElideComma = true } if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group prevRemoveLines = map[string]bool{} prevInsertLines = map[string]bool{} } list2 = append(list2, r) } if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { list2 = list2[:len(list2)-1] // elide single empty line at the end } list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) if isTripleQuoted { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: // Always emit type for slices since the triple-quote syntax // looks like a string (not a slice). opts = opts.WithTypeMode(emitType) out = opts.FormatType(t, out) } return out } // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. case isMostlyText: list = opts.formatDiffSlice( reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", func(v reflect.Value, d diffMode) textRecord { s := formatString(v.String()) return textRecord{Diff: d, Value: textLine(s)} }, ) // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. // The output is inspired by hexdump. case isBinary: list = opts.formatDiffSlice( reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", func(v reflect.Value, d diffMode) textRecord { var ss []string for i := 0; i < v.Len(); i++ { ss = append(ss, formatHex(v.Index(i).Uint())) } s := strings.Join(ss, ", ") comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) return textRecord{Diff: d, Value: textLine(s), Comment: comment} }, ) // For all other slices of primitive types, // then perform differencing in approximately fixed-sized chunks. // The size of each chunk depends on the width of the element kind. default: var chunkSize int if t.Elem().Kind() == reflect.Bool { chunkSize = 16 } else { switch t.Elem().Bits() { case 8: chunkSize = 16 case 16: chunkSize = 12 case 32: chunkSize = 8 default: chunkSize = 8 } } list = opts.formatDiffSlice( vx, vy, chunkSize, t.Elem().Kind().String(), func(v reflect.Value, d diffMode) textRecord { var ss []string for i := 0; i < v.Len(); i++ { switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ss = append(ss, fmt.Sprint(v.Index(i).Int())) case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: ss = append(ss, fmt.Sprint(v.Index(i).Uint())) case reflect.Uint8, reflect.Uintptr: ss = append(ss, formatHex(v.Index(i).Uint())) case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: ss = append(ss, fmt.Sprint(v.Index(i).Interface())) } } s := strings.Join(ss, ", ") return textRecord{Diff: d, Value: textLine(s)} }, ) } // Wrap the output with appropriate type information. var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} if !isMostlyText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). if t.Kind() == reflect.String { opts = opts.WithTypeMode(emitType) } return opts.FormatType(t, out) } switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != bytesType { out = opts.FormatType(t, out) } } return out } // formatASCII formats s as an ASCII string. // This is useful for printing binary strings in a semi-legible way. func formatASCII(s string) string { b := bytes.Repeat([]byte{'.'}, len(s)) for i := 0; i < len(s); i++ { if ' ' <= s[i] && s[i] <= '~' { b[i] = s[i] } } return string(b) } func (opts formatOptions) formatDiffSlice( vx, vy reflect.Value, chunkSize int, name string, makeRec func(reflect.Value, diffMode) textRecord, ) (list textList) { eq := func(ix, iy int) bool { return vx.Index(ix).Interface() == vy.Index(iy).Interface() } es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { return diff.BoolResult(eq(ix, iy)) }) appendChunks := func(v reflect.Value, d diffMode) int { n0 := v.Len() for v.Len() > 0 { n := chunkSize if n > v.Len() { n = v.Len() } list = append(list, makeRec(v.Slice(0, n), d)) v = v.Slice(n, v.Len()) } return n0 - v.Len() } var numDiffs int maxLen := -1 if opts.LimitVerbosity { maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... opts.VerbosityLevel-- } groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) groups = cleanupSurroundingIdentical(groups, eq) maxGroup := diffStats{Name: name} for i, ds := range groups { if maxLen >= 0 && numDiffs >= maxLen { maxGroup = maxGroup.Append(ds) continue } // Print equal. if ds.NumDiff() == 0 { // Compute the number of leading and trailing equal bytes to print. var numLo, numHi int numEqual := ds.NumIgnored + ds.NumIdentical for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { numLo++ } for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { numHi++ } if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { numHi = numEqual - numLo // Avoid pointless coalescing of single equal row } // Print the equal bytes. appendChunks(vx.Slice(0, numLo), diffIdentical) if numEqual > numLo+numHi { ds.NumIdentical -= numLo + numHi list.AppendEllipsis(ds) } appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) vx = vx.Slice(numEqual, vx.Len()) vy = vy.Slice(numEqual, vy.Len()) continue } // Print unequal. len0 := len(list) nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) vx = vx.Slice(nx, vx.Len()) ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) vy = vy.Slice(ny, vy.Len()) numDiffs += len(list) - len0 } if maxGroup.IsZero() { assert(vx.Len() == 0 && vy.Len() == 0) } else { list.AppendEllipsis(maxGroup) } return list } // coalesceAdjacentEdits coalesces the list of edits into groups of adjacent // equal or unequal counts. // // Example: // // Input: "..XXY...Y" // Output: [ // {NumIdentical: 2}, // {NumRemoved: 2, NumInserted 1}, // {NumIdentical: 3}, // {NumInserted: 1}, // ] func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { if prevMode != mode { groups = append(groups, diffStats{Name: name}) prevMode = mode } return &groups[len(groups)-1] } for _, e := range es { switch e { case diff.Identity: lastStats('=').NumIdentical++ case diff.UniqueX: lastStats('!').NumRemoved++ case diff.UniqueY: lastStats('!').NumInserted++ case diff.Modified: lastStats('!').NumModified++ } } return groups } // coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) // equal groups into adjacent unequal groups that currently result in a // dual inserted/removed printout. This acts as a high-pass filter to smooth // out high-frequency changes within the windowSize. // // Example: // // WindowSize: 16, // Input: [ // {NumIdentical: 61}, // group 0 // {NumRemoved: 3, NumInserted: 1}, // group 1 // {NumIdentical: 6}, // ├── coalesce // {NumInserted: 2}, // ├── coalesce // {NumIdentical: 1}, // ├── coalesce // {NumRemoved: 9}, // └── coalesce // {NumIdentical: 64}, // group 2 // {NumRemoved: 3, NumInserted: 1}, // group 3 // {NumIdentical: 6}, // ├── coalesce // {NumInserted: 2}, // ├── coalesce // {NumIdentical: 1}, // ├── coalesce // {NumRemoved: 7}, // ├── coalesce // {NumIdentical: 1}, // ├── coalesce // {NumRemoved: 2}, // └── coalesce // {NumIdentical: 63}, // group 4 // ] // Output: [ // {NumIdentical: 61}, // {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 64}, // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { if len(groups) >= 2 && ds.NumDiff() > 0 { prev := &groups[len(groups)-2] // Unequal group curr := &groups[len(groups)-1] // Equal group next := &groupsOrig[i] // Unequal group hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { *prev = prev.Append(*curr).Append(*next) groups = groups[:len(groups)-1] // Truncate off equal group continue } } groups = append(groups, ds) } return groups } // cleanupSurroundingIdentical scans through all unequal groups, and // moves any leading sequence of equal elements to the preceding equal group and // moves and trailing sequence of equal elements to the succeeding equal group. // // This is necessary since coalesceInterveningIdentical may coalesce edit groups // together such that leading/trailing spans of equal elements becomes possible. // Note that this can occur even with an optimal diffing algorithm. // // Example: // // Input: [ // {NumIdentical: 61}, // {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements // {NumIdentical: 67}, // {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements // {NumIdentical: 54}, // ] // Output: [ // {NumIdentical: 64}, // incremented by 3 // {NumRemoved: 9}, // {NumIdentical: 67}, // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { // Handle equal group. if ds.NumDiff() == 0 { ix += ds.NumIdentical iy += ds.NumIdentical continue } // Handle unequal group. nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified var numLeadingIdentical, numTrailingIdentical int for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { numLeadingIdentical++ } for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { numTrailingIdentical++ } if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { if numLeadingIdentical > 0 { // Remove leading identical span from this group and // insert it into the preceding group. if i-1 >= 0 { groups[i-1].NumIdentical += numLeadingIdentical } else { // No preceding group exists, so prepend a new group, // but do so after we finish iterating over all groups. defer func() { groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) }() } // Increment indexes since the preceding group would have handled this. ix += numLeadingIdentical iy += numLeadingIdentical } if numTrailingIdentical > 0 { // Remove trailing identical span from this group and // insert it into the succeeding group. if i+1 < len(groups) { groups[i+1].NumIdentical += numTrailingIdentical } else { // No succeeding group exists, so append a new group, // but do so after we finish iterating over all groups. defer func() { groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) }() } // Do not increment indexes since the succeeding group will handle this. } // Update this group since some identical elements were removed. nx -= numIdentical ny -= numIdentical groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} } ix += nx iy += ny } return groups } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/testdata/0000755000000000000000000000000015024302467023503 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/testdata/diffs0000644000000000000000000014761015024302467024532 0ustar rootroot<<< TestDiff/Comparer/StructInequal struct{ A int; B int; C int }{ A: 1, B: 2, - C: 3, + C: 4, } >>> TestDiff/Comparer/StructInequal <<< TestDiff/Comparer/PointerStructInequal &struct{ A *int }{ - A: &4, + A: &5, } >>> TestDiff/Comparer/PointerStructInequal <<< TestDiff/Comparer/StructNestedPointerInequal &struct{ R *bytes.Buffer }{ - R: s"", + R: nil, } >>> TestDiff/Comparer/StructNestedPointerInequal <<< TestDiff/Comparer/RegexpInequal []*regexp.Regexp{ nil, - s"a*b*c*", + s"a*b*d*", } >>> TestDiff/Comparer/RegexpInequal <<< TestDiff/Comparer/TriplePointerInequal &&&int( - 0, + 1, ) >>> TestDiff/Comparer/TriplePointerInequal <<< TestDiff/Comparer/StringerInequal struct{ fmt.Stringer }( - s"hello", + s"hello2", ) >>> TestDiff/Comparer/StringerInequal <<< TestDiff/Comparer/DifferingHash [32]uint8{ - 0xca, 0x97, 0x81, 0x12, 0xca, 0x1b, 0xbd, 0xca, 0xfa, 0xc2, 0x31, 0xb3, 0x9a, 0x23, 0xdc, 0x4d, - 0xa7, 0x86, 0xef, 0xf8, 0x14, 0x7c, 0x4e, 0x72, 0xb9, 0x80, 0x77, 0x85, 0xaf, 0xee, 0x48, 0xbb, + 0x3e, 0x23, 0xe8, 0x16, 0x00, 0x39, 0x59, 0x4a, 0x33, 0x89, 0x4f, 0x65, 0x64, 0xe1, 0xb1, 0x34, + 0x8b, 0xbd, 0x7a, 0x00, 0x88, 0xd4, 0x2c, 0x4a, 0xcb, 0x73, 0xee, 0xae, 0xd5, 0x9c, 0x00, 0x9d, } >>> TestDiff/Comparer/DifferingHash <<< TestDiff/Comparer/NilStringer any( - &fmt.Stringer(nil), ) >>> TestDiff/Comparer/NilStringer <<< TestDiff/Comparer/TarHeaders []cmp_test.tarHeader{ { ... // 4 identical fields Size: 1, ModTime: s"2009-11-10 23:00:00 +0000 UTC", - Typeflag: 48, + Typeflag: 0, Linkname: "", Uname: "user", ... // 6 identical fields }, { ... // 4 identical fields Size: 2, ModTime: s"2009-11-11 00:00:00 +0000 UTC", - Typeflag: 48, + Typeflag: 0, Linkname: "", Uname: "user", ... // 6 identical fields }, { ... // 4 identical fields Size: 4, ModTime: s"2009-11-11 01:00:00 +0000 UTC", - Typeflag: 48, + Typeflag: 0, Linkname: "", Uname: "user", ... // 6 identical fields }, { ... // 4 identical fields Size: 8, ModTime: s"2009-11-11 02:00:00 +0000 UTC", - Typeflag: 48, + Typeflag: 0, Linkname: "", Uname: "user", ... // 6 identical fields }, { ... // 4 identical fields Size: 16, ModTime: s"2009-11-11 03:00:00 +0000 UTC", - Typeflag: 48, + Typeflag: 0, Linkname: "", Uname: "user", ... // 6 identical fields }, } >>> TestDiff/Comparer/TarHeaders <<< TestDiff/Comparer/IrreflexiveComparison []int{ - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), - Inverse(λ, float64(NaN)), + Inverse(λ, float64(NaN)), } >>> TestDiff/Comparer/IrreflexiveComparison <<< TestDiff/Comparer/StringerMapKey map[*testprotos.Stringer]*testprotos.Stringer( - {s"hello": s"world"}, + nil, ) >>> TestDiff/Comparer/StringerMapKey <<< TestDiff/Comparer/StringerBacktick any( - []*testprotos.Stringer{s`multi\nline\nline\nline`}, ) >>> TestDiff/Comparer/StringerBacktick <<< TestDiff/Comparer/DynamicMap []any{ map[string]any{ "avg": float64(0.278), - "hr": int(65), + "hr": float64(65), "name": string("Mark McGwire"), }, map[string]any{ "avg": float64(0.288), - "hr": int(63), + "hr": float64(63), "name": string("Sammy Sosa"), }, } >>> TestDiff/Comparer/DynamicMap <<< TestDiff/Comparer/MapKeyPointer map[*int]string{ - &⟪0xdeadf00f⟫0: "hello", + &⟪0xdeadf00f⟫0: "world", } >>> TestDiff/Comparer/MapKeyPointer <<< TestDiff/Comparer/IgnoreSliceElements [2][]int{ {..., 1, 2, 3, ...}, { ... // 6 ignored and 1 identical elements - 20, + 2, ... // 3 ignored elements }, } >>> TestDiff/Comparer/IgnoreSliceElements <<< TestDiff/Comparer/IgnoreMapEntries [2]map[string]int{ {"KEEP3": 3, "keep1": 1, "keep2": 2, ...}, { ... // 2 ignored entries "keep1": 1, + "keep2": 2, }, } >>> TestDiff/Comparer/IgnoreMapEntries <<< TestDiff/Transformer/Uints uint8(Inverse(λ, uint16(Inverse(λ, uint32(Inverse(λ, uint64( - 0, + 1, ))))))) >>> TestDiff/Transformer/Uints <<< TestDiff/Transformer/Filtered []int{ Inverse(λ, int64(0)), - Inverse(λ, int64(-5)), + Inverse(λ, int64(3)), Inverse(λ, int64(0)), - Inverse(λ, int64(-1)), + Inverse(λ, int64(-5)), } >>> TestDiff/Transformer/Filtered <<< TestDiff/Transformer/DisjointOutput int(Inverse(λ, any( - string("zero"), + float64(1), ))) >>> TestDiff/Transformer/DisjointOutput <<< TestDiff/Transformer/JSON string(Inverse(ParseJSON, map[string]any{ "address": map[string]any{ - "city": string("Los Angeles"), + "city": string("New York"), "postalCode": string("10021-3100"), - "state": string("CA"), + "state": string("NY"), "streetAddress": string("21 2nd Street"), }, "age": float64(25), "children": []any{}, "firstName": string("John"), "isAlive": bool(true), "lastName": string("Smith"), "phoneNumbers": []any{ map[string]any{ - "number": string("212 555-4321"), + "number": string("212 555-1234"), "type": string("home"), }, map[string]any{"number": string("646 555-4567"), "type": string("office")}, map[string]any{"number": string("123 456-7890"), "type": string("mobile")}, }, + "spouse": nil, })) >>> TestDiff/Transformer/JSON <<< TestDiff/Transformer/AcyclicString cmp_test.StringBytes{ String: Inverse(SplitString, []string{ "some", "multi", - "Line", + "line", "string", }), Bytes: []uint8(Inverse(SplitBytes, [][]uint8{ "some", "multi", "line", { - 0x62, + 0x42, 0x79, 0x74, ... // 2 identical elements }, })), } >>> TestDiff/Transformer/AcyclicString <<< TestDiff/Reporter/PanicStringer struct{ X fmt.Stringer }{ - X: struct{ fmt.Stringer }{}, + X: s"", } >>> TestDiff/Reporter/PanicStringer <<< TestDiff/Reporter/PanicError struct{ X error }{ - X: struct{ error }{}, + X: e"", } >>> TestDiff/Reporter/PanicError <<< TestDiff/Reporter/AmbiguousType any( - "github.com/google/go-cmp/cmp/internal/teststructs/foo1".Bar{}, + "github.com/google/go-cmp/cmp/internal/teststructs/foo2".Bar{}, ) >>> TestDiff/Reporter/AmbiguousType <<< TestDiff/Reporter/AmbiguousPointer (*int)( - &⟪0xdeadf00f⟫0, + &⟪0xdeadf00f⟫0, ) >>> TestDiff/Reporter/AmbiguousPointer <<< TestDiff/Reporter/AmbiguousPointerStruct struct{ I *int }{ - I: &⟪0xdeadf00f⟫0, + I: &⟪0xdeadf00f⟫0, } >>> TestDiff/Reporter/AmbiguousPointerStruct <<< TestDiff/Reporter/AmbiguousPointerSlice []*int{ - &⟪0xdeadf00f⟫0, + &⟪0xdeadf00f⟫0, } >>> TestDiff/Reporter/AmbiguousPointerSlice <<< TestDiff/Reporter/AmbiguousPointerMap map[string]*int{ - "zero": &⟪0xdeadf00f⟫0, + "zero": &⟪0xdeadf00f⟫0, } >>> TestDiff/Reporter/AmbiguousPointerMap <<< TestDiff/Reporter/AmbiguousStringer any( - cmp_test.Stringer("hello"), + &cmp_test.Stringer("hello"), ) >>> TestDiff/Reporter/AmbiguousStringer <<< TestDiff/Reporter/AmbiguousStringerStruct struct{ S fmt.Stringer }{ - S: cmp_test.Stringer("hello"), + S: &cmp_test.Stringer("hello"), } >>> TestDiff/Reporter/AmbiguousStringerStruct <<< TestDiff/Reporter/AmbiguousStringerSlice []fmt.Stringer{ - cmp_test.Stringer("hello"), + &cmp_test.Stringer("hello"), } >>> TestDiff/Reporter/AmbiguousStringerSlice <<< TestDiff/Reporter/AmbiguousStringerMap map[string]fmt.Stringer{ - "zero": cmp_test.Stringer("hello"), + "zero": &cmp_test.Stringer("hello"), } >>> TestDiff/Reporter/AmbiguousStringerMap <<< TestDiff/Reporter/AmbiguousSliceHeader []int( - ⟪ptr:0xdeadf00f, len:0, cap:5⟫{}, + ⟪ptr:0xdeadf00f, len:0, cap:1000⟫{}, ) >>> TestDiff/Reporter/AmbiguousSliceHeader <<< TestDiff/Reporter/AmbiguousStringerMapKey map[any]string{ - nil: "nil", + &⟪0xdeadf00f⟫"github.com/google/go-cmp/cmp_test".Stringer("hello"): "goodbye", - "github.com/google/go-cmp/cmp_test".Stringer("hello"): "goodbye", - "github.com/google/go-cmp/cmp/internal/teststructs/foo1".Bar{S: "fizz"}: "buzz", + "github.com/google/go-cmp/cmp/internal/teststructs/foo2".Bar{S: "fizz"}: "buzz", } >>> TestDiff/Reporter/AmbiguousStringerMapKey <<< TestDiff/Reporter/NonAmbiguousStringerMapKey map[any]string{ + s"fizz": "buzz", - s"hello": "goodbye", } >>> TestDiff/Reporter/NonAmbiguousStringerMapKey <<< TestDiff/Reporter/InvalidUTF8 any( - cmp_test.MyString("\xed\xa0\x80"), ) >>> TestDiff/Reporter/InvalidUTF8 <<< TestDiff/Reporter/UnbatchedSlice cmp_test.MyComposite{ ... // 3 identical fields BytesB: nil, BytesC: nil, IntsA: []int8{ + 10, 11, - 12, + 21, 13, 14, ... // 15 identical elements }, IntsB: nil, IntsC: nil, ... // 6 identical fields } >>> TestDiff/Reporter/UnbatchedSlice <<< TestDiff/Reporter/BatchedSlice cmp_test.MyComposite{ ... // 3 identical fields BytesB: nil, BytesC: nil, IntsA: []int8{ - 10, 11, 12, 13, 14, 15, 16, + 12, 29, 13, 27, 22, 23, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, + 10, 26, 16, 25, 28, 11, 15, 24, 14, }, IntsB: nil, IntsC: nil, ... // 6 identical fields } >>> TestDiff/Reporter/BatchedSlice <<< TestDiff/Reporter/BatchedWithComparer cmp_test.MyComposite{ StringA: "", StringB: "", BytesA: []uint8{ - 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, // -|.......| + 0x0c, 0x1d, 0x0d, 0x1b, 0x16, 0x17, // +|......| 0x11, 0x12, 0x13, 0x14, 0x15, // |.....| - 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, // -|........| + 0x0a, 0x1a, 0x10, 0x19, 0x1c, 0x0b, 0x0f, 0x18, 0x0e, // +|.........| }, BytesB: nil, BytesC: nil, ... // 9 identical fields } >>> TestDiff/Reporter/BatchedWithComparer <<< TestDiff/Reporter/BatchedLong any( - cmp_test.MyComposite{IntsA: []int8{0, 1, 2, 3, 4, 5, 6, 7, ...}}, ) >>> TestDiff/Reporter/BatchedLong <<< TestDiff/Reporter/BatchedNamedAndUnnamed cmp_test.MyComposite{ StringA: "", StringB: "", BytesA: []uint8{ - 0x01, 0x02, 0x03, // -|...| + 0x03, 0x02, 0x01, // +|...| }, BytesB: []cmp_test.MyByte{ - 0x04, 0x05, 0x06, + 0x06, 0x05, 0x04, }, BytesC: cmp_test.MyBytes{ - 0x07, 0x08, 0x09, // -|...| + 0x09, 0x08, 0x07, // +|...| }, IntsA: []int8{ - -1, -2, -3, + -3, -2, -1, }, IntsB: []cmp_test.MyInt{ - -4, -5, -6, + -6, -5, -4, }, IntsC: cmp_test.MyInts{ - -7, -8, -9, + -9, -8, -7, }, UintsA: []uint16{ - 1000, 2000, 3000, + 3000, 2000, 1000, }, UintsB: []cmp_test.MyUint{ - 4000, 5000, 6000, + 6000, 5000, 4000, }, UintsC: cmp_test.MyUints{ - 7000, 8000, 9000, + 9000, 8000, 7000, }, FloatsA: []float32{ - 1.5, 2.5, 3.5, + 3.5, 2.5, 1.5, }, FloatsB: []cmp_test.MyFloat{ - 4.5, 5.5, 6.5, + 6.5, 5.5, 4.5, }, FloatsC: cmp_test.MyFloats{ - 7.5, 8.5, 9.5, + 9.5, 8.5, 7.5, }, } >>> TestDiff/Reporter/BatchedNamedAndUnnamed <<< TestDiff/Reporter/BinaryHexdump cmp_test.MyComposite{ StringA: "", StringB: "", BytesA: []uint8{ 0xf3, 0x0f, 0x8a, 0xa4, 0xd3, 0x12, 0x52, 0x09, 0x24, 0xbe, // |......R.$.| - 0x58, 0x95, 0x41, 0xfd, 0x24, 0x66, 0x58, 0x8b, 0x79, // -|X.A.$fX.y| 0x54, 0xac, 0x0d, 0xd8, 0x71, 0x77, 0x70, 0x20, 0x6a, 0x5c, 0x73, 0x7f, 0x8c, 0x17, 0x55, 0xc0, // |T...qwp j\s...U.| 0x34, 0xce, 0x6e, 0xf7, 0xaa, 0x47, 0xee, 0x32, 0x9d, 0xc5, 0xca, 0x1e, 0x58, 0xaf, 0x8f, 0x27, // |4.n..G.2....X..'| 0xf3, 0x02, 0x4a, 0x90, 0xed, 0x69, 0x2e, 0x70, 0x32, 0xb4, 0xab, 0x30, 0x20, 0xb6, 0xbd, 0x5c, // |..J..i.p2..0 ..\| 0x62, 0x34, 0x17, 0xb0, 0x00, 0xbb, 0x4f, 0x7e, 0x27, 0x47, 0x06, 0xf4, 0x2e, 0x66, 0xfd, 0x63, // |b4....O~'G...f.c| 0xd7, 0x04, 0xdd, 0xb7, 0x30, 0xb7, 0xd1, // |....0..| - 0x55, 0x7e, 0x7b, 0xf6, 0xb3, 0x7e, 0x1d, 0x57, 0x69, // -|U~{..~.Wi| + 0x75, 0x2d, 0x5b, 0x5d, 0x5d, 0xf6, 0xb3, 0x68, 0x61, 0x68, 0x61, 0x7e, 0x1d, 0x57, 0x49, // +|u-[]]..haha~.WI| 0x20, 0x9e, 0xbc, 0xdf, 0xe1, 0x4d, 0xa9, 0xef, 0xa2, 0xd2, 0xed, 0xb4, 0x47, 0x78, 0xc9, 0xc9, // | ....M......Gx..| 0x27, 0xa4, 0xc6, 0xce, 0xec, 0x44, 0x70, 0x5d, // |'....Dp]| }, BytesB: nil, BytesC: nil, ... // 9 identical fields } >>> TestDiff/Reporter/BinaryHexdump <<< TestDiff/Reporter/StringHexdump cmp_test.MyComposite{ StringA: "", StringB: cmp_test.MyString{ - 0x72, 0x65, 0x61, 0x64, 0x6d, 0x65, // -|readme| + 0x67, 0x6f, 0x70, 0x68, 0x65, 0x72, // +|gopher| 0x2e, 0x74, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |.txt............| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |................| ... // 64 identical bytes 0x30, 0x30, 0x36, 0x30, 0x30, 0x00, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x30, 0x30, // |00600.0000000.00| 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x34, // |00000.0000000004| - 0x36, // -|6| + 0x33, // +|3| 0x00, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x30, 0x31, 0x31, // |.00000000000.011| - 0x31, 0x37, 0x33, // -|173| + 0x32, 0x31, 0x37, // +|217| 0x00, 0x20, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |. 0.............| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |................| ... // 326 identical bytes }, BytesA: nil, BytesB: nil, ... // 10 identical fields } >>> TestDiff/Reporter/StringHexdump <<< TestDiff/Reporter/BinaryString cmp_test.MyComposite{ StringA: "", StringB: "", BytesA: bytes.Join({ `{"firstName":"John","lastName":"Smith","isAlive":true,"age":27,"`, `address":{"streetAddress":"`, - "314 54th Avenue", + "21 2nd Street", `","city":"New York","state":"NY","postalCode":"10021-3100"},"pho`, `neNumbers":[{"type":"home","number":"212 555-1234"},{"type":"off`, ... // 101 identical bytes }, ""), BytesB: nil, BytesC: nil, ... // 9 identical fields } >>> TestDiff/Reporter/BinaryString <<< TestDiff/Reporter/TripleQuote cmp_test.MyComposite{ StringA: ( """ aaa bbb - ccc + CCC ddd eee ... // 10 identical lines ppp qqq - RRR - sss + rrr + SSS ttt uuu ... // 6 identical lines """ ), StringB: "", BytesA: nil, ... // 11 identical fields } >>> TestDiff/Reporter/TripleQuote <<< TestDiff/Reporter/TripleQuoteSlice []string{ ( """ ... // 23 identical lines xxx yyy - zzz """ ), "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\n"..., } >>> TestDiff/Reporter/TripleQuoteSlice <<< TestDiff/Reporter/TripleQuoteNamedTypes cmp_test.MyComposite{ StringA: "", StringB: ( """ aaa bbb - ccc + CCC ddd eee ... // 10 identical lines ppp qqq - RRR - sss + rrr + SSS ttt uuu ... // 5 identical lines """ ), BytesA: nil, BytesB: nil, BytesC: cmp_test.MyBytes( """ aaa bbb - ccc + CCC ddd eee ... // 10 identical lines ppp qqq - RRR - sss + rrr + SSS ttt uuu ... // 5 identical lines """ ), IntsA: nil, IntsB: nil, ... // 7 identical fields } >>> TestDiff/Reporter/TripleQuoteNamedTypes <<< TestDiff/Reporter/TripleQuoteSliceNamedTypes []cmp_test.MyString{ ( """ ... // 23 identical lines xxx yyy - zzz """ ), "aaa\nbbb\nccc\nddd\neee\nfff\nggg\nhhh\niii\njjj\nkkk\nlll\nmmm\nnnn\nooo\nppp\n"..., } >>> TestDiff/Reporter/TripleQuoteSliceNamedTypes <<< TestDiff/Reporter/TripleQuoteEndlines ( """ aaa bbb - ccc + CCC ddd eee ... // 10 identical lines ppp qqq - RRR + rrr sss ttt ... // 4 identical lines yyy zzz - """ ) >>> TestDiff/Reporter/TripleQuoteEndlines <<< TestDiff/Reporter/AvoidTripleQuoteAmbiguousQuotes strings.Join({ "aaa", "bbb", - "ccc", + "CCC", "ddd", "eee", - "fff", + `"""`, "ggg", "hhh", ... // 7 identical lines "ppp", "qqq", - "RRR", + "rrr", "sss", "ttt", ... // 7 identical lines }, "\n") >>> TestDiff/Reporter/AvoidTripleQuoteAmbiguousQuotes <<< TestDiff/Reporter/AvoidTripleQuoteAmbiguousEllipsis strings.Join({ "aaa", "bbb", - "ccc", - "...", + "CCC", + "ddd", "eee", "fff", ... // 9 identical lines "ppp", "qqq", - "RRR", + "rrr", "sss", "ttt", ... // 7 identical lines }, "\n") >>> TestDiff/Reporter/AvoidTripleQuoteAmbiguousEllipsis <<< TestDiff/Reporter/AvoidTripleQuoteNonPrintable strings.Join({ "aaa", "bbb", - "ccc", + "CCC", "ddd", "eee", ... // 7 identical lines "mmm", "nnn", - "ooo", + "o\roo", "ppp", "qqq", - "RRR", + "rrr", "sss", "ttt", ... // 7 identical lines }, "\n") >>> TestDiff/Reporter/AvoidTripleQuoteNonPrintable <<< TestDiff/Reporter/AvoidTripleQuoteIdenticalWhitespace strings.Join({ "aaa", "bbb", - "ccc", - " ddd", + "ccc ", + "ddd", "eee", "fff", ... // 9 identical lines "ppp", "qqq", - "RRR", + "rrr", "sss", "ttt", ... // 7 identical lines }, "\n") >>> TestDiff/Reporter/AvoidTripleQuoteIdenticalWhitespace <<< TestDiff/Reporter/TripleQuoteStringer []fmt.Stringer{ s"package main\n\nimport (\n\t\"fmt\"\n)\n\nfunc main() {\n\tfmt.Println(\"Hel"..., - ( - s""" - package main - - import ( - "fmt" - "math/rand" - ) - - func main() { - fmt.Println("My favorite number is", rand.Intn(10)) - } - s""" - ), + ( + s""" + package main + + import ( + "fmt" + "math" + ) + + func main() { + fmt.Printf("Now you have %g problems.\n", math.Sqrt(7)) + } + s""" + ), } >>> TestDiff/Reporter/TripleQuoteStringer <<< TestDiff/Reporter/LimitMaximumBytesDiffs []uint8{ - 0xcd, 0x3d, 0x3d, 0x3d, 0x3d, 0x06, 0x1f, 0xc2, 0xcc, 0xc2, 0x2d, 0x53, // -|.====.....-S| + 0x5c, 0x3d, 0x3d, 0x3d, 0x3d, 0x7c, 0x96, 0xe7, 0x53, 0x42, 0xa0, 0xab, // +|\====|..SB..| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |=====| - 0x1d, 0xdf, 0x61, 0xae, 0x98, 0x9f, 0x48, // -|..a...H| + 0xf0, 0xbd, 0xa5, 0x71, 0xab, 0x17, 0x3b, // +|...q..;| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |======| - 0xc7, 0xb0, 0xb7, // -|...| + 0xab, 0x50, 0x00, // +|.P.| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |=======| - 0xef, 0x3d, 0x3d, 0x3d, 0x3d, 0x3a, 0x5c, 0x94, 0xe6, 0x4a, 0xc7, // -|.====:\..J.| + 0xeb, 0x3d, 0x3d, 0x3d, 0x3d, 0xa5, 0x14, 0xe6, 0x4f, 0x28, 0xe4, // +|.====...O(.| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |=====| - 0xb4, // -|.| + 0x28, // +|(| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |======| - 0x0a, 0x0a, 0xf7, 0x94, // -|....| + 0x2f, 0x63, 0x40, 0x3f, // +|/c@?| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |===========| - 0xf2, 0x9c, 0xc0, 0x66, // -|...f| + 0xd9, 0x78, 0xed, 0x13, // +|.x..| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |=====| - 0x34, 0xf6, 0xf1, 0xc3, 0x17, 0x82, // -|4.....| + 0x4a, 0xfc, 0x91, 0x38, 0x42, 0x8d, // +|J..8B.| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |======| - 0x6e, 0x16, 0x60, 0x91, 0x44, 0xc6, 0x06, // -|n.`.D..| + 0x61, 0x38, 0x41, 0xeb, 0x73, 0x04, 0xae, // +|a8A.s..| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |=======| - 0x1c, 0x45, 0x3d, 0x3d, 0x3d, 0x3d, 0x2e, // -|.E====.| + 0x07, 0x43, 0x3d, 0x3d, 0x3d, 0x3d, 0x1c, // +|.C====.| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |===========| - 0xc4, 0x18, // -|..| + 0x91, 0x22, // +|."| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |=======| - 0x8a, 0x8d, 0x0e, 0x3d, 0x3d, 0x3d, 0x3d, 0x87, 0xb1, 0xa5, 0x8e, 0xc3, 0x3d, 0x3d, 0x3d, 0x3d, // -|...====.....====| - 0x3d, 0x7a, 0x0f, 0x31, 0xae, 0x55, 0x3d, // -|=z.1.U=| + 0x75, 0xd8, 0xbe, 0x3d, 0x3d, 0x3d, 0x3d, 0x73, 0xec, 0x84, 0x35, 0x07, 0x3d, 0x3d, 0x3d, 0x3d, // +|u..====s..5.====| + 0x3d, 0x3b, 0xab, 0x53, 0x39, 0x74, // +|=;.S9t| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |=====| - 0x47, 0x2c, 0x3d, // -|G,=| + 0x3d, 0x1f, 0x1b, // +|=..| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |======| - 0x35, 0xe7, 0x35, 0xee, 0x82, 0xf4, 0xce, 0x3d, 0x3d, 0x3d, 0x3d, 0x11, 0x72, 0x3d, // -|5.5....====.r=| + 0x3d, 0x80, 0xab, 0x2f, 0xed, 0x2b, 0x3a, 0x3b, 0x3d, 0x3d, 0x3d, 0x3d, 0xea, 0x49, // +|=../.+:;====.I| 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, 0x3d, // |==========| - 0xaf, 0x5d, 0x3d, // -|.]=| + 0x3d, 0xab, 0x6c, // +|=.l| ... // 51 identical, 34 removed, and 35 inserted bytes } >>> TestDiff/Reporter/LimitMaximumBytesDiffs <<< TestDiff/Reporter/LimitMaximumStringDiffs ( """ - a + aa b - c + cc d - e + ee f - g + gg h - i + ii j - k + kk l - m + mm n - o + oo p - q + qq r - s + ss t - u + uu v - w + ww x - y + yy z - A + AA B - C + CC D - E + EE ... // 12 identical, 10 removed, and 10 inserted lines """ ) >>> TestDiff/Reporter/LimitMaximumStringDiffs <<< TestDiff/Reporter/LimitMaximumSliceDiffs []struct{ S string }{ - {S: "a"}, + {S: "aa"}, {S: "b"}, - {S: "c"}, + {S: "cc"}, {S: "d"}, - {S: "e"}, + {S: "ee"}, {S: "f"}, - {S: "g"}, + {S: "gg"}, {S: "h"}, - {S: "i"}, + {S: "ii"}, {S: "j"}, - {S: "k"}, + {S: "kk"}, {S: "l"}, - {S: "m"}, + {S: "mm"}, {S: "n"}, - {S: "o"}, + {S: "oo"}, {S: "p"}, - {S: "q"}, + {S: "qq"}, {S: "r"}, - {S: "s"}, + {S: "ss"}, {S: "t"}, - {S: "u"}, + {S: "uu"}, {S: "v"}, - {S: "w"}, + {S: "ww"}, {S: "x"}, - {S: "y"}, + {S: "yy"}, {S: "z"}, - {S: "A"}, + {S: "AA"}, {S: "B"}, - {S: "C"}, + {S: "CC"}, {S: "D"}, - {S: "E"}, + {S: "EE"}, ... // 12 identical and 10 modified elements } >>> TestDiff/Reporter/LimitMaximumSliceDiffs <<< TestDiff/Reporter/MultilineString cmp_test.MyComposite{ StringA: ( """ - Package cmp determines equality of values. + Package cmp determines equality of value. This package is intended to be a more powerful and safer alternative to ... // 6 identical lines For example, an equality function may report floats as equal so long as they are within some tolerance of each other. - - • Types that have an Equal method may use that method to determine equality. - This allows package authors to determine the equality operation for the types - that they define. • If no custom equality functions are used and no Equal method is defined, ... // 3 identical lines by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared using the AllowUnexported option. - """ ), StringB: "", BytesA: nil, ... // 11 identical fields } >>> TestDiff/Reporter/MultilineString <<< TestDiff/Reporter/Slices cmp_test.MyComposite{ StringA: "", StringB: "", - BytesA: []uint8{0x01, 0x02, 0x03}, + BytesA: nil, - BytesB: []cmp_test.MyByte{0x04, 0x05, 0x06}, + BytesB: nil, - BytesC: cmp_test.MyBytes{0x07, 0x08, 0x09}, + BytesC: nil, - IntsA: []int8{-1, -2, -3}, + IntsA: nil, - IntsB: []cmp_test.MyInt{-4, -5, -6}, + IntsB: nil, - IntsC: cmp_test.MyInts{-7, -8, -9}, + IntsC: nil, - UintsA: []uint16{1000, 2000, 3000}, + UintsA: nil, - UintsB: []cmp_test.MyUint{4000, 5000, 6000}, + UintsB: nil, - UintsC: cmp_test.MyUints{7000, 8000, 9000}, + UintsC: nil, - FloatsA: []float32{1.5, 2.5, 3.5}, + FloatsA: nil, - FloatsB: []cmp_test.MyFloat{4.5, 5.5, 6.5}, + FloatsB: nil, - FloatsC: cmp_test.MyFloats{7.5, 8.5, 9.5}, + FloatsC: nil, } >>> TestDiff/Reporter/Slices <<< TestDiff/Reporter/EmptySlices cmp_test.MyComposite{ StringA: "", StringB: "", - BytesA: []uint8{}, + BytesA: nil, - BytesB: []cmp_test.MyByte{}, + BytesB: nil, - BytesC: cmp_test.MyBytes{}, + BytesC: nil, - IntsA: []int8{}, + IntsA: nil, - IntsB: []cmp_test.MyInt{}, + IntsB: nil, - IntsC: cmp_test.MyInts{}, + IntsC: nil, - UintsA: []uint16{}, + UintsA: nil, - UintsB: []cmp_test.MyUint{}, + UintsB: nil, - UintsC: cmp_test.MyUints{}, + UintsC: nil, - FloatsA: []float32{}, + FloatsA: nil, - FloatsB: []cmp_test.MyFloat{}, + FloatsB: nil, - FloatsC: cmp_test.MyFloats{}, + FloatsC: nil, } >>> TestDiff/Reporter/EmptySlices <<< TestDiff/Reporter/LargeMapKey map[*[]uint8]int{ - &⟪0xdeadf00f⟫⟪ptr:0xdeadf00f, len:1048576, cap:1048576⟫{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ...}: 0, + &⟪0xdeadf00f⟫⟪ptr:0xdeadf00f, len:1048576, cap:1048576⟫{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ...}: 0, } >>> TestDiff/Reporter/LargeMapKey <<< TestDiff/Reporter/LargeStringInInterface struct{ X any }{ X: strings.Join({ ... // 485 identical bytes "s mus. Pellentesque mi lorem, consectetur id porttitor id, solli", "citudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis", - ".", + ",", }, ""), } >>> TestDiff/Reporter/LargeStringInInterface <<< TestDiff/Reporter/LargeBytesInInterface struct{ X any }{ X: bytes.Join({ ... // 485 identical bytes "s mus. Pellentesque mi lorem, consectetur id porttitor id, solli", "citudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis", - ".", + ",", }, ""), } >>> TestDiff/Reporter/LargeBytesInInterface <<< TestDiff/Reporter/LargeStandaloneString struct{ X any }{ - X: [1]string{ - "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam sit amet pretium ligula, at gravida quam. Integer iaculis, velit at sagittis ultricies, lacus metus scelerisque turpis, ornare feugiat nulla nisl ac erat. Maecenas elementum ultricies libero, sed efficitur lacus molestie non. Nulla ac pretium dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Pellentesque mi lorem, consectetur id porttitor id, sollicitudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis.", - }, + X: [1]string{ + "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam sit amet pretium ligula, at gravida quam. Integer iaculis, velit at sagittis ultricies, lacus metus scelerisque turpis, ornare feugiat nulla nisl ac erat. Maecenas elementum ultricies libero, sed efficitur lacus molestie non. Nulla ac pretium dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Pellentesque mi lorem, consectetur id porttitor id, sollicitudin sit amet enim. Duis eu dolor magna. Nunc ut augue turpis,", + }, } >>> TestDiff/Reporter/LargeStandaloneString <<< TestDiff/Reporter/SurroundingEqualElements strings.Join({ "org-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=aa", - ",#=_value", ` _value=2 11 org-4747474747474747,bucket-4242424242424242:m,tag1`, "=a,tag2=bb", - ",#=_value", ` _value=2 21 org-4747474747474747,bucket-4242424242424242:m,tag1`, "=b,tag2=cc", - ",#=_value", ` _value=1 21 org-4747474747474747,bucket-4242424242424242:m,tag1`, "=a,tag2=dd", - ",#=_value", ` _value=3 31 org-4747474747474747,bucket-4242424242424242:m,tag1`, "=c", - ",#=_value", ` _value=4 41 `, }, "") >>> TestDiff/Reporter/SurroundingEqualElements <<< TestDiff/Reporter/MostlyTextString strings.Join({ "org-4747474747474747,bucket-4242424242424242:m,tag1=a,tag2=aa", - ",\xff=_value", " _value=2 11\norg-4747474747474747,bucket-4242424242424242:m,tag1", "=a,tag2=bb", - ",\xff=_value", " _value=2 21\norg-4747474747474747,bucket-4242424242424242:m,tag1", "=b,tag2=cc", - ",\xff=_value", " _value=1 21\norg-4747474747474747,bucket-4242424242424242:m,tag1", "=a,tag2=dd", - ",\xff=_value", " _value=3 31\norg-4747474747474747,bucket-4242424242424242:m,tag1", "=c", - ",\xff=_value", " _value=4 41\n", }, "") >>> TestDiff/Reporter/MostlyTextString <<< TestDiff/Reporter/AllLinesDiffer strings.Join({ + "X", "d5c14bdf6bac81c27afc5429500ed750\n", + "X", "25483503b557c606dad4f144d27ae10b\n", + "X", "90bdbcdbb6ea7156068e3dcfb7459244\n", + "X", "978f480a6e3cced51e297fbff9a506b7\n", }, "") >>> TestDiff/Reporter/AllLinesDiffer <<< TestDiff/Reporter/StringifiedBytes struct{ X []uint8 }{ - X: []uint8("hello, world!"), + X: nil, } >>> TestDiff/Reporter/StringifiedBytes <<< TestDiff/Reporter/NonStringifiedBytes struct{ X []uint8 }{ - X: []uint8{0xde, 0xad, 0xbe, 0xef}, + X: nil, } >>> TestDiff/Reporter/NonStringifiedBytes <<< TestDiff/Reporter/StringifiedNamedBytes struct{ X cmp_test.MyBytes }{ - X: cmp_test.MyBytes("hello, world!"), + X: nil, } >>> TestDiff/Reporter/StringifiedNamedBytes <<< TestDiff/Reporter/NonStringifiedNamedBytes struct{ X cmp_test.MyBytes }{ - X: cmp_test.MyBytes{0xde, 0xad, 0xbe, 0xef}, + X: nil, } >>> TestDiff/Reporter/NonStringifiedNamedBytes <<< TestDiff/Reporter/ShortJSON ( """ { - "id": 1, + "id": 1434180, "foo": true, "bar": true, } """ ) >>> TestDiff/Reporter/ShortJSON <<< TestDiff/Reporter/PointerToStringOrAny any( - &string("hello"), + &any(string("hello")), ) >>> TestDiff/Reporter/PointerToStringOrAny <<< TestDiff/Reporter/NamedPointer any( - &string("hello"), + cmp_test.PointerString(&string("hello")), ) >>> TestDiff/Reporter/NamedPointer <<< TestDiff/Reporter/MapStringAny map[string]any{ - "key": int(0), + "key": uint(0), } >>> TestDiff/Reporter/MapStringAny <<< TestDiff/Reporter/StructFieldAny struct{ X any }{ - X: int(0), + X: uint(0), } >>> TestDiff/Reporter/StructFieldAny <<< TestDiff/Reporter/SliceOfBytesText [][]uint8{ - "hello", "foo", + "foo", "barbaz", + "added", + "here", - "blahdieblah", + "hrmph", } >>> TestDiff/Reporter/SliceOfBytesText <<< TestDiff/Reporter/SliceOfBytesBinary [][]uint8{ - {0xde, 0xad, 0xbe, 0xef}, {0xff, 0x6f, 0x6f}, + "foo", "barbaz", + "added", + "here", - "blahdieblah", + {0x68, 0x72, 0x6d, 0x70, 0x68, 0xff}, } >>> TestDiff/Reporter/SliceOfBytesBinary <<< TestDiff/Reporter/ManyEscapeCharacters ( """ [ - {"Base32": "NA======"}, + {"Base32": "NB======"}, {"Base32": "NBSQ===="}, {"Base32": "NBSWY==="}, ... // 3 identical lines """ ) >>> TestDiff/Reporter/ManyEscapeCharacters <<< TestDiff/EmbeddedStruct/ParentStructA/Inequal teststructs.ParentStructA{ privateStruct: teststructs.privateStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, } >>> TestDiff/EmbeddedStruct/ParentStructA/Inequal <<< TestDiff/EmbeddedStruct/ParentStructB/Inequal teststructs.ParentStructB{ PublicStruct: teststructs.PublicStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, } >>> TestDiff/EmbeddedStruct/ParentStructB/Inequal <<< TestDiff/EmbeddedStruct/ParentStructC/Inequal teststructs.ParentStructC{ privateStruct: teststructs.privateStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, - Public: 3, + Public: 4, - private: 4, + private: 5, } >>> TestDiff/EmbeddedStruct/ParentStructC/Inequal <<< TestDiff/EmbeddedStruct/ParentStructD/Inequal teststructs.ParentStructD{ PublicStruct: teststructs.PublicStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, - Public: 3, + Public: 4, - private: 4, + private: 5, } >>> TestDiff/EmbeddedStruct/ParentStructD/Inequal <<< TestDiff/EmbeddedStruct/ParentStructE/Inequal teststructs.ParentStructE{ privateStruct: teststructs.privateStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, PublicStruct: teststructs.PublicStruct{ - Public: 3, + Public: 4, - private: 4, + private: 5, }, } >>> TestDiff/EmbeddedStruct/ParentStructE/Inequal <<< TestDiff/EmbeddedStruct/ParentStructF/Inequal teststructs.ParentStructF{ privateStruct: teststructs.privateStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, PublicStruct: teststructs.PublicStruct{ - Public: 3, + Public: 4, - private: 4, + private: 5, }, - Public: 5, + Public: 6, - private: 6, + private: 7, } >>> TestDiff/EmbeddedStruct/ParentStructF/Inequal <<< TestDiff/EmbeddedStruct/ParentStructG/Inequal &teststructs.ParentStructG{ privateStruct: &teststructs.privateStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, } >>> TestDiff/EmbeddedStruct/ParentStructG/Inequal <<< TestDiff/EmbeddedStruct/ParentStructH/Inequal &teststructs.ParentStructH{ PublicStruct: &teststructs.PublicStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, } >>> TestDiff/EmbeddedStruct/ParentStructH/Inequal <<< TestDiff/EmbeddedStruct/ParentStructI/Inequal &teststructs.ParentStructI{ privateStruct: &teststructs.privateStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, PublicStruct: &teststructs.PublicStruct{ - Public: 3, + Public: 4, - private: 4, + private: 5, }, } >>> TestDiff/EmbeddedStruct/ParentStructI/Inequal <<< TestDiff/EmbeddedStruct/ParentStructJ/Inequal &teststructs.ParentStructJ{ privateStruct: &teststructs.privateStruct{ - Public: 1, + Public: 2, - private: 2, + private: 3, }, PublicStruct: &teststructs.PublicStruct{ - Public: 3, + Public: 4, - private: 4, + private: 5, }, Public: teststructs.PublicStruct{ - Public: 7, + Public: 8, - private: 8, + private: 9, }, private: teststructs.privateStruct{ - Public: 5, + Public: 6, - private: 6, + private: 7, }, } >>> TestDiff/EmbeddedStruct/ParentStructJ/Inequal <<< TestDiff/EqualMethod/StructB/ValueInequal teststructs.StructB{ - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructB/ValueInequal <<< TestDiff/EqualMethod/StructD/ValueInequal teststructs.StructD{ - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructD/ValueInequal <<< TestDiff/EqualMethod/StructE/ValueInequal teststructs.StructE{ - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructE/ValueInequal <<< TestDiff/EqualMethod/StructF/ValueInequal teststructs.StructF{ - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructF/ValueInequal <<< TestDiff/EqualMethod/StructA1/ValueInequal teststructs.StructA1{ StructA: {X: "NotEqual"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructA1/ValueInequal <<< TestDiff/EqualMethod/StructA1/PointerInequal &teststructs.StructA1{ StructA: {X: "NotEqual"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructA1/PointerInequal <<< TestDiff/EqualMethod/StructB1/ValueInequal teststructs.StructB1{ StructB: Inverse(Addr, &teststructs.StructB{X: "NotEqual"}), - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructB1/ValueInequal <<< TestDiff/EqualMethod/StructB1/PointerInequal &teststructs.StructB1{ StructB: Inverse(Addr, &teststructs.StructB{X: "NotEqual"}), - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructB1/PointerInequal <<< TestDiff/EqualMethod/StructD1/ValueInequal teststructs.StructD1{ - StructD: teststructs.StructD{X: "NotEqual"}, + StructD: teststructs.StructD{X: "not_equal"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructD1/ValueInequal <<< TestDiff/EqualMethod/StructE1/ValueInequal teststructs.StructE1{ - StructE: teststructs.StructE{X: "NotEqual"}, + StructE: teststructs.StructE{X: "not_equal"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructE1/ValueInequal <<< TestDiff/EqualMethod/StructF1/ValueInequal teststructs.StructF1{ - StructF: teststructs.StructF{X: "NotEqual"}, + StructF: teststructs.StructF{X: "not_equal"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructF1/ValueInequal <<< TestDiff/EqualMethod/StructA2/ValueInequal teststructs.StructA2{ StructA: &{X: "NotEqual"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructA2/ValueInequal <<< TestDiff/EqualMethod/StructA2/PointerInequal &teststructs.StructA2{ StructA: &{X: "NotEqual"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructA2/PointerInequal <<< TestDiff/EqualMethod/StructB2/ValueInequal teststructs.StructB2{ StructB: &{X: "NotEqual"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructB2/ValueInequal <<< TestDiff/EqualMethod/StructB2/PointerInequal &teststructs.StructB2{ StructB: &{X: "NotEqual"}, - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructB2/PointerInequal <<< TestDiff/EqualMethod/StructNo/Inequal teststructs.StructNo{ - X: "NotEqual", + X: "not_equal", } >>> TestDiff/EqualMethod/StructNo/Inequal <<< TestDiff/Cycle/PointersInequal &&⟪ref#0⟫cmp_test.P( - &⟪ref#0⟫(...), + &&⟪ref#0⟫(...), ) >>> TestDiff/Cycle/PointersInequal <<< TestDiff/Cycle/SlicesInequal cmp_test.S{ - ⟪ref#0⟫{⟪ref#0⟫(...)}, + ⟪ref#1⟫{{⟪ref#1⟫(...)}}, } >>> TestDiff/Cycle/SlicesInequal <<< TestDiff/Cycle/MapsInequal cmp_test.M⟪ref#0⟫{ - 0: ⟪ref#0⟫(...), + 0: {0: ⟪ref#0⟫(...)}, } >>> TestDiff/Cycle/MapsInequal <<< TestDiff/Cycle/GraphInequalZeroed map[string]*cmp_test.CycleAlpha{ "Bar": &⟪ref#0⟫{ Name: "Bar", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫{ - ID: 102, + ID: 0, Name: "BarBuzzBravo", Mods: 2, Alphas: map[string]*cmp_test.CycleAlpha{ "Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫{ Name: "Buzz", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫(...), "BuzzBarBravo": &⟪ref#3⟫{ - ID: 103, + ID: 0, Name: "BuzzBarBravo", Mods: 0, Alphas: {"Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫(...)}, }, }, }, }, }, "BuzzBarBravo": &⟪ref#3⟫{ - ID: 103, + ID: 0, Name: "BuzzBarBravo", Mods: 0, Alphas: map[string]*cmp_test.CycleAlpha{ "Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫{ Name: "Buzz", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫{ - ID: 102, + ID: 0, Name: "BarBuzzBravo", Mods: 2, Alphas: {"Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫(...)}, }, "BuzzBarBravo": &⟪ref#3⟫(...), }, }, }, }, }, }, "Buzz": &⟪ref#2⟫{ Name: "Buzz", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫{ - ID: 102, + ID: 0, Name: "BarBuzzBravo", Mods: 2, Alphas: map[string]*cmp_test.CycleAlpha{ "Bar": &⟪ref#0⟫{ Name: "Bar", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫(...), "BuzzBarBravo": &⟪ref#3⟫{ - ID: 103, + ID: 0, Name: "BuzzBarBravo", Mods: 0, Alphas: {"Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫(...)}, }, }, }, "Buzz": &⟪ref#2⟫(...), }, }, "BuzzBarBravo": &⟪ref#3⟫{ - ID: 103, + ID: 0, Name: "BuzzBarBravo", Mods: 0, Alphas: map[string]*cmp_test.CycleAlpha{ "Bar": &⟪ref#0⟫{ Name: "Bar", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫{ - ID: 102, + ID: 0, Name: "BarBuzzBravo", Mods: 2, Alphas: {"Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫(...)}, }, "BuzzBarBravo": &⟪ref#3⟫(...), }, }, "Buzz": &⟪ref#2⟫(...), }, }, }, }, "Foo": &⟪ref#4⟫{ Name: "Foo", Bravos: map[string]*cmp_test.CycleBravo{ "FooBravo": &{ - ID: 101, + ID: 0, Name: "FooBravo", Mods: 100, Alphas: {"Foo": &⟪ref#4⟫(...)}, }, }, }, } >>> TestDiff/Cycle/GraphInequalZeroed <<< TestDiff/Cycle/GraphInequalStruct map[string]*cmp_test.CycleAlpha{ "Bar": &⟪ref#0⟫{ Name: "Bar", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫{ ID: 102, Name: "BarBuzzBravo", Mods: 2, Alphas: map[string]*cmp_test.CycleAlpha{ "Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫{ Name: "Buzz", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫(...), "BuzzBarBravo": &⟪ref#3⟫{ ID: 103, Name: "BuzzBarBravo", Mods: 0, - Alphas: nil, + Alphas: map[string]*cmp_test.CycleAlpha{"Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫(...)}, }, }, }, }, }, "BuzzBarBravo": &⟪ref#3⟫{ ID: 103, Name: "BuzzBarBravo", Mods: 0, Alphas: map[string]*cmp_test.CycleAlpha{ "Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫{ Name: "Buzz", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫{ID: 102, Name: "BarBuzzBravo", Mods: 2, Alphas: {"Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫(...)}}, - "BuzzBarBravo": &{ID: 103, Name: "BuzzBarBravo"}, + "BuzzBarBravo": &⟪ref#3⟫(...), }, }, }, }, }, }, "Buzz": &⟪ref#2⟫{ Name: "Buzz", Bravos: map[string]*cmp_test.CycleBravo{ "BarBuzzBravo": &⟪ref#1⟫{ID: 102, Name: "BarBuzzBravo", Mods: 2, Alphas: {"Bar": &⟪ref#0⟫{Name: "Bar", Bravos: {"BarBuzzBravo": &⟪ref#1⟫(...), "BuzzBarBravo": &⟪ref#3⟫{ID: 103, Name: "BuzzBarBravo", Alphas: {"Bar": &⟪ref#0⟫(...), "Buzz": &⟪ref#2⟫(...)}}}}, "Buzz": &⟪ref#2⟫(...)}}, "BuzzBarBravo": &⟪ref#3⟫{ ID: 103, Name: "BuzzBarBravo", Mods: 0, - Alphas: nil, + Alphas: map[string]*cmp_test.CycleAlpha{ + "Bar": &⟪ref#0⟫{ + Name: "Bar", + Bravos: map[string]*cmp_test.CycleBravo{"BarBuzzBravo": &⟪ref#1⟫{...}, "BuzzBarBravo": &⟪ref#3⟫(...)}, + }, + "Buzz": &⟪ref#2⟫(...), + }, }, }, }, "Foo": &⟪ref#4⟫{Name: "Foo", Bravos: {"FooBravo": &{ID: 101, Name: "FooBravo", Mods: 100, Alphas: {"Foo": &⟪ref#4⟫(...)}}}}, } >>> TestDiff/Cycle/GraphInequalStruct <<< TestDiff/Project1/ProtoInequal teststructs.Eagle{ ... // 4 identical fields Dreamers: nil, Prong: 0, Slaps: []teststructs.Slap{ ... // 2 identical elements {}, {}, { Name: "", Desc: "", DescLong: "", - Args: s"metadata", + Args: s"metadata2", Tense: 0, Interval: 0, ... // 3 identical fields }, }, StateGoverner: "", PrankRating: "", ... // 2 identical fields } >>> TestDiff/Project1/ProtoInequal <<< TestDiff/Project1/Inequal teststructs.Eagle{ ... // 2 identical fields Desc: "some description", DescLong: "", Dreamers: []teststructs.Dreamer{ {}, { ... // 4 identical fields ContSlaps: nil, ContSlapsInterval: 0, Animal: []any{ teststructs.Goat{ Target: "corporation", Slaps: nil, FunnyPrank: "", Immutable: &teststructs.GoatImmutable{ - ID: "southbay2", + ID: "southbay", - State: &6, + State: &5, Started: s"2009-11-10 23:00:00 +0000 UTC", Stopped: s"0001-01-01 00:00:00 +0000 UTC", ... // 1 ignored and 1 identical fields }, }, teststructs.Donkey{}, }, Ornamental: false, Amoeba: 53, ... // 5 identical fields }, }, Prong: 0, Slaps: []teststructs.Slap{ { ... // 6 identical fields Homeland: 0, FunnyPrank: "", Immutable: &teststructs.SlapImmutable{ ID: "immutableSlap", Out: nil, - MildSlap: false, + MildSlap: true, PrettyPrint: "", State: nil, Started: s"2009-11-10 23:00:00 +0000 UTC", Stopped: s"0001-01-01 00:00:00 +0000 UTC", LastUpdate: s"0001-01-01 00:00:00 +0000 UTC", LoveRadius: &teststructs.LoveRadius{ Summer: &teststructs.SummerLove{ Summary: &teststructs.SummerLoveSummary{ Devices: []string{ "foo", - "bar", - "baz", }, ChangeType: {1, 2, 3}, ... // 1 ignored field }, ... // 1 ignored field }, ... // 1 ignored field }, ... // 1 ignored field }, }, }, StateGoverner: "", PrankRating: "", ... // 2 identical fields } >>> TestDiff/Project1/Inequal <<< TestDiff/Project2/InequalOrder teststructs.GermBatch{ DirtyGerms: map[int32][]*testprotos.Germ{ 17: {s"germ1"}, 18: { - s"germ2", s"germ3", s"germ4", + s"germ2", }, }, CleanGerms: nil, GermMap: {13: s"germ13", 21: s"germ21"}, ... // 7 identical fields } >>> TestDiff/Project2/InequalOrder <<< TestDiff/Project2/Inequal teststructs.GermBatch{ DirtyGerms: map[int32][]*testprotos.Germ{ + 17: {s"germ1"}, 18: Inverse(Sort, []*testprotos.Germ{ s"germ2", s"germ3", - s"germ4", }), }, CleanGerms: nil, GermMap: {13: s"germ13", 21: s"germ21"}, DishMap: map[int32]*teststructs.Dish{ 0: &{err: e"EOF"}, - 1: nil, + 1: &{err: e"unexpected EOF"}, 2: &{pb: s"dish"}, }, HasPreviousResult: true, DirtyID: 10, CleanID: 0, - GermStrain: 421, + GermStrain: 22, TotalDirtyGerms: 0, InfectedAt: s"2009-11-10 23:00:00 +0000 UTC", } >>> TestDiff/Project2/Inequal <<< TestDiff/Project3/Inequal teststructs.Dirt{ - table: &teststructs.MockTable{state: []string{"a", "c"}}, + table: &teststructs.MockTable{state: []string{"a", "b", "c"}}, ts: 12345, - Discord: 554, + Discord: 500, - Proto: testprotos.Dirt(Inverse(λ, s"blah")), + Proto: testprotos.Dirt(Inverse(λ, s"proto")), wizard: map[string]*testprotos.Wizard{ - "albus": s"dumbledore", - "harry": s"potter", + "harry": s"otter", }, sadistic: nil, lastTime: 54321, ... // 1 ignored field } >>> TestDiff/Project3/Inequal <<< TestDiff/Project4/Inequal teststructs.Cartel{ Headquarter: teststructs.Headquarter{ id: 5, location: "moon", subDivisions: []string{ - "alpha", "bravo", "charlie", }, incorporatedDate: s"0001-01-01 00:00:00 +0000 UTC", metaData: s"metadata", privateMessage: nil, publicMessage: []uint8{ 0x01, 0x02, - 0x03, + 0x04, - 0x04, + 0x03, 0x05, }, horseBack: "abcdef", rattle: "", ... // 5 identical fields }, source: "mars", creationDate: s"0001-01-01 00:00:00 +0000 UTC", boss: "al capone", lastCrimeDate: s"0001-01-01 00:00:00 +0000 UTC", poisons: []*teststructs.Poison{ &{ - poisonType: 1, + poisonType: 5, expiration: s"2009-11-10 23:00:00 +0000 UTC", manufacturer: "acme", potency: 0, }, - &{poisonType: 2, manufacturer: "acme2"}, }, } >>> TestDiff/Project4/Inequal dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/export.go0000644000000000000000000000224515024302467023545 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "reflect" "unsafe" ) // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField // describing the field to retrieve. If addr is false, // then the returned value will be shallowed copied to be non-addressable. func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() if !addr { // A field is addressable if and only if the struct is addressable. // If the original parent value was not addressable, shallow copy the // value to make it non-addressable to avoid leaking an implementation // detail of how forcibly exporting a field works. if ve.Kind() == reflect.Interface && ve.IsNil() { return reflect.Zero(f.Type) } return reflect.ValueOf(ve.Interface()).Convert(f.Type) } return ve } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/report_reflect.go0000644000000000000000000003167215024302467025251 0ustar rootroot// Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "bytes" "fmt" "reflect" "strconv" "strings" "unicode" "unicode/utf8" "github.com/google/go-cmp/cmp/internal/value" ) var ( anyType = reflect.TypeOf((*interface{})(nil)).Elem() stringType = reflect.TypeOf((*string)(nil)).Elem() bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() byteType = reflect.TypeOf((*byte)(nil)).Elem() ) type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. AvoidStringer bool // PrintAddresses controls whether to print the address of all pointers, // slice elements, and maps. PrintAddresses bool // QualifiedNames controls whether FormatType uses the fully qualified name // (including the full package path as opposed to just the package name). QualifiedNames bool // VerbosityLevel controls the amount of output to produce. // A higher value produces more output. A value of zero or lower produces // no output (represented using an ellipsis). // If LimitVerbosity is false, then the level is treated as infinite. VerbosityLevel int // LimitVerbosity specifies that formatting should respect VerbosityLevel. LimitVerbosity bool } // FormatType prints the type as if it were wrapping s. // This may return s as-is depending on the current type and TypeMode mode. func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { // Check whether to emit the type or not. switch opts.TypeMode { case autoType: switch t.Kind() { case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: if s.Equal(textNil) { return s } default: return s } if opts.DiffMode == diffIdentical { return s // elide type for identical nodes } case elideType: return s } // Determine the type label, applying special handling for unnamed types. typeName := value.TypeString(t, opts.QualifiedNames) if t.Name() == "" { // According to Go grammar, certain type literals contain symbols that // do not strongly bind to the next lexicographical token (e.g., *T). switch t.Kind() { case reflect.Chan, reflect.Func, reflect.Ptr: typeName = "(" + typeName + ")" } } return &textWrap{Prefix: typeName, Value: wrapParens(s)} } // wrapParens wraps s with a set of parenthesis, but avoids it if the // wrapped node itself is already surrounded by a pair of parenthesis or braces. // It handles unwrapping one level of pointer-reference nodes. func wrapParens(s textNode) textNode { var refNode *textWrap if s2, ok := s.(*textWrap); ok { // Unwrap a single pointer reference node. switch s2.Metadata.(type) { case leafReference, trunkReference, trunkReferences: refNode = s2 if s3, ok := refNode.Value.(*textWrap); ok { s2 = s3 } } // Already has delimiters that make parenthesis unnecessary. hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") if hasParens || hasBraces { return s } } if refNode != nil { refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} return s } return &textWrap{Prefix: "(", Value: s, Suffix: ")"} } // FormatValue prints the reflect.Value, taking extra care to avoid descending // into pointers already in ptrs. As pointers are visited, ptrs is also updated. func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { if !v.IsValid() { return nil } t := v.Type() // Check slice element for cycles. if parentKind == reflect.Slice { ptrRef, visited := ptrs.Push(v.Addr()) if visited { return makeLeafReference(ptrRef, false) } defer ptrs.Pop() defer func() { out = wrapTrunkReference(ptrRef, false, out) }() } // Check whether there is an Error or String method to call. if !opts.AvoidStringer && v.CanInterface() { // Avoid calling Error or String methods on nil receivers since many // implementations crash when doing so. if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { var prefix, strVal string func() { // Swallow and ignore any panics from String or Error. defer func() { recover() }() switch v := v.Interface().(type) { case error: strVal = v.Error() prefix = "e" case fmt.Stringer: strVal = v.String() prefix = "s" } }() if prefix != "" { return opts.formatString(prefix, strVal) } } } // Check whether to explicitly wrap the result with the type. var skipType bool defer func() { if !skipType { out = opts.FormatType(t, out) } }() switch t.Kind() { case reflect.Bool: return textLine(fmt.Sprint(v.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return textLine(fmt.Sprint(v.Int())) case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: return textLine(fmt.Sprint(v.Uint())) case reflect.Uint8: if parentKind == reflect.Slice || parentKind == reflect.Array { return textLine(formatHex(v.Uint())) } return textLine(fmt.Sprint(v.Uint())) case reflect.Uintptr: return textLine(formatHex(v.Uint())) case reflect.Float32, reflect.Float64: return textLine(fmt.Sprint(v.Float())) case reflect.Complex64, reflect.Complex128: return textLine(fmt.Sprint(v.Complex())) case reflect.String: return opts.formatString("", v.String()) case reflect.UnsafePointer, reflect.Chan, reflect.Func: return textLine(formatPointer(value.PointerOf(v), true)) case reflect.Struct: var list textList v := makeAddressable(v) // needed for retrieveUnexportedField maxLen := v.NumField() if opts.LimitVerbosity { maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... opts.VerbosityLevel-- } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { list.AppendEllipsis(diffStats{}) break } sf := t.Field(i) if !isExported(sf.Name) { vv = retrieveUnexportedField(v, sf, true) } s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) list = append(list, textRecord{Key: sf.Name, Value: s}) } return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case reflect.Slice: if v.IsNil() { return textNil } // Check whether this is a []byte of text data. if t.Elem() == byteType { b := v.Bytes() isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) skipType = true return opts.FormatType(t, out) } } fallthrough case reflect.Array: maxLen := v.Len() if opts.LimitVerbosity { maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... opts.VerbosityLevel-- } var list textList for i := 0; i < v.Len(); i++ { if len(list) == maxLen { list.AppendEllipsis(diffStats{}) break } s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) list = append(list, textRecord{Value: s}) } out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} if t.Kind() == reflect.Slice && opts.PrintAddresses { header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} } return out case reflect.Map: if v.IsNil() { return textNil } // Check pointer for cycles. ptrRef, visited := ptrs.Push(v) if visited { return makeLeafReference(ptrRef, opts.PrintAddresses) } defer ptrs.Pop() maxLen := v.Len() if opts.LimitVerbosity { maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... opts.VerbosityLevel-- } var list textList for _, k := range value.SortKeys(v.MapKeys()) { if len(list) == maxLen { list.AppendEllipsis(diffStats{}) break } sk := formatMapKey(k, false, ptrs) sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) list = append(list, textRecord{Key: sk, Value: sv}) } out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) return out case reflect.Ptr: if v.IsNil() { return textNil } // Check pointer for cycles. ptrRef, visited := ptrs.Push(v) if visited { out = makeLeafReference(ptrRef, opts.PrintAddresses) return &textWrap{Prefix: "&", Value: out} } defer ptrs.Pop() // Skip the name only if this is an unnamed pointer type. // Otherwise taking the address of a value does not reproduce // the named pointer type. if v.Type().Name() == "" { skipType = true // Let the underlying value print the type instead } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} return out case reflect.Interface: if v.IsNil() { return textNil } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) } } func (opts formatOptions) formatString(prefix, s string) textNode { maxLen := len(s) maxLines := strings.Count(s, "\n") + 1 if opts.LimitVerbosity { maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... } // For multiline strings, use the triple-quote syntax, // but only use it when printing removed or inserted nodes since // we only want the extra verbosity for those cases. lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') for i := 0; i < len(lines) && isTripleQuoted; i++ { lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support isPrintable := func(r rune) bool { return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable } line := lines[i] isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen } if isTripleQuoted { var list textList list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) for i, line := range lines { if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { comment := commentString(fmt.Sprintf("%d elided lines", numElided)) list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) break } list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) } list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) return &textWrap{Prefix: "(", Value: list, Suffix: ")"} } // Format the string as a single-line quoted string. if len(s) > maxLen+len(textEllipsis) { return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) } return textLine(prefix + formatString(s)) } // formatMapKey formats v as if it were a map key. // The result is guaranteed to be a single line. func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { var opts formatOptions opts.DiffMode = diffIdentical opts.TypeMode = elideType opts.PrintAddresses = disambiguate opts.AvoidStringer = disambiguate opts.QualifiedNames = disambiguate opts.VerbosityLevel = maxVerbosityPreset opts.LimitVerbosity = true s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } // formatString prints s as a double-quoted or backtick-quoted string. func formatString(s string) string { // Use quoted string if it the same length as a raw string literal. // Otherwise, attempt to use the raw string form. qs := strconv.Quote(s) if len(qs) == 1+len(s)+1 { return qs } // Disallow newlines to ensure output is a single line. // Only allow printable runes for readability purposes. rawInvalid := func(r rune) bool { return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') } if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { return "`" + s + "`" } return qs } // formatHex prints u as a hexadecimal integer in Go notation. func formatHex(u uint64) string { var f string switch { case u <= 0xff: f = "0x%02x" case u <= 0xffff: f = "0x%04x" case u <= 0xffffff: f = "0x%06x" case u <= 0xffffffff: f = "0x%08x" case u <= 0xffffffffff: f = "0x%010x" case u <= 0xffffffffffff: f = "0x%012x" case u <= 0xffffffffffffff: f = "0x%014x" case u <= 0xffffffffffffffff: f = "0x%016x" } return fmt.Sprintf(f, u) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/options_test.go0000644000000000000000000001456015024302467024761 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "io" "reflect" "strings" "testing" ts "github.com/google/go-cmp/cmp/internal/teststructs" ) // Test that the creation of Option values with non-sensible inputs produces // a run-time panic with a decent error message func TestOptionPanic(t *testing.T) { type myBool bool tests := []struct { label string // Test description fnc interface{} // Option function to call args []interface{} // Arguments to pass in wantPanic string // Expected panic message }{{ label: "AllowUnexported", fnc: AllowUnexported, args: []interface{}{}, }, { label: "AllowUnexported", fnc: AllowUnexported, args: []interface{}{1}, wantPanic: "invalid struct type", }, { label: "AllowUnexported", fnc: AllowUnexported, args: []interface{}{ts.StructA{}}, }, { label: "AllowUnexported", fnc: AllowUnexported, args: []interface{}{ts.StructA{}, ts.StructB{}, ts.StructA{}}, }, { label: "AllowUnexported", fnc: AllowUnexported, args: []interface{}{ts.StructA{}, &ts.StructB{}, ts.StructA{}}, wantPanic: "invalid struct type", }, { label: "Comparer", fnc: Comparer, args: []interface{}{5}, wantPanic: "invalid comparer function", }, { label: "Comparer", fnc: Comparer, args: []interface{}{func(x, y interface{}) bool { return true }}, }, { label: "Comparer", fnc: Comparer, args: []interface{}{func(x, y io.Reader) bool { return true }}, }, { label: "Comparer", fnc: Comparer, args: []interface{}{func(x, y io.Reader) myBool { return true }}, wantPanic: "invalid comparer function", }, { label: "Comparer", fnc: Comparer, args: []interface{}{func(x string, y interface{}) bool { return true }}, wantPanic: "invalid comparer function", }, { label: "Comparer", fnc: Comparer, args: []interface{}{(func(int, int) bool)(nil)}, wantPanic: "invalid comparer function", }, { label: "Transformer", fnc: Transformer, args: []interface{}{"", 0}, wantPanic: "invalid transformer function", }, { label: "Transformer", fnc: Transformer, args: []interface{}{"", func(int) int { return 0 }}, }, { label: "Transformer", fnc: Transformer, args: []interface{}{"", func(bool) bool { return true }}, }, { label: "Transformer", fnc: Transformer, args: []interface{}{"", func(int) bool { return true }}, }, { label: "Transformer", fnc: Transformer, args: []interface{}{"", func(int, int) bool { return true }}, wantPanic: "invalid transformer function", }, { label: "Transformer", fnc: Transformer, args: []interface{}{"", (func(int) uint)(nil)}, wantPanic: "invalid transformer function", }, { label: "Transformer", fnc: Transformer, args: []interface{}{"Func", func(Path) Path { return nil }}, }, { label: "Transformer", fnc: Transformer, args: []interface{}{"世界", func(int) bool { return true }}, }, { label: "Transformer", fnc: Transformer, args: []interface{}{"/*", func(int) bool { return true }}, wantPanic: "invalid name", }, { label: "Transformer", fnc: Transformer, args: []interface{}{"_", func(int) bool { return true }}, }, { label: "FilterPath", fnc: FilterPath, args: []interface{}{(func(Path) bool)(nil), Ignore()}, wantPanic: "invalid path filter function", }, { label: "FilterPath", fnc: FilterPath, args: []interface{}{func(Path) bool { return true }, Ignore()}, }, { label: "FilterPath", fnc: FilterPath, args: []interface{}{func(Path) bool { return true }, Reporter(&defaultReporter{})}, wantPanic: "invalid option type", }, { label: "FilterPath", fnc: FilterPath, args: []interface{}{func(Path) bool { return true }, Options{Ignore(), Ignore()}}, }, { label: "FilterPath", fnc: FilterPath, args: []interface{}{func(Path) bool { return true }, Options{Ignore(), Reporter(&defaultReporter{})}}, wantPanic: "invalid option type", }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{0, Ignore()}, wantPanic: "invalid values filter function", }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{func(x, y int) bool { return true }, Ignore()}, }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{func(x, y interface{}) bool { return true }, Ignore()}, }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{func(x, y interface{}) myBool { return true }, Ignore()}, wantPanic: "invalid values filter function", }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{func(x io.Reader, y interface{}) bool { return true }, Ignore()}, wantPanic: "invalid values filter function", }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{(func(int, int) bool)(nil), Ignore()}, wantPanic: "invalid values filter function", }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{func(int, int) bool { return true }, Reporter(&defaultReporter{})}, wantPanic: "invalid option type", }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{func(int, int) bool { return true }, Options{Ignore(), Ignore()}}, }, { label: "FilterValues", fnc: FilterValues, args: []interface{}{func(int, int) bool { return true }, Options{Ignore(), Reporter(&defaultReporter{})}}, wantPanic: "invalid option type", }} for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { var gotPanic string func() { defer func() { if ex := recover(); ex != nil { if s, ok := ex.(string); ok { gotPanic = s } else { panic(ex) } } }() var vargs []reflect.Value for _, arg := range tt.args { vargs = append(vargs, reflect.ValueOf(arg)) } reflect.ValueOf(tt.fnc).Call(vargs) }() if tt.wantPanic == "" { if gotPanic != "" { t.Fatalf("unexpected panic message: %s", gotPanic) } } else { if !strings.Contains(gotPanic, tt.wantPanic) { t.Fatalf("panic message:\ngot: %s\nwant: %s", gotPanic, tt.wantPanic) } } }) } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/report_references.go0000644000000000000000000001707315024302467025745 0ustar rootroot// Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "fmt" "reflect" "strings" "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/value" ) const ( pointerDelimPrefix = "⟪" pointerDelimSuffix = "⟫" ) // formatPointer prints the address of the pointer. func formatPointer(p value.Pointer, withDelims bool) string { v := p.Uintptr() if flags.Deterministic { v = 0xdeadf00f // Only used for stable testing purposes } if withDelims { return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix } return formatHex(uint64(v)) } // pointerReferences is a stack of pointers visited so far. type pointerReferences [][2]value.Pointer func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { if deref && vx.IsValid() { vx = vx.Addr() } if deref && vy.IsValid() { vy = vy.Addr() } switch d { case diffUnknown, diffIdentical: pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} case diffRemoved: pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} case diffInserted: pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} } *ps = append(*ps, pp) return pp } func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { p = value.PointerOf(v) for _, pp := range *ps { if p == pp[0] || p == pp[1] { return p, true } } *ps = append(*ps, [2]value.Pointer{p, p}) return p, false } func (ps *pointerReferences) Pop() { *ps = (*ps)[:len(*ps)-1] } // trunkReferences is metadata for a textNode indicating that the sub-tree // represents the value for either pointer in a pair of references. type trunkReferences struct{ pp [2]value.Pointer } // trunkReference is metadata for a textNode indicating that the sub-tree // represents the value for the given pointer reference. type trunkReference struct{ p value.Pointer } // leafReference is metadata for a textNode indicating that the value is // truncated as it refers to another part of the tree (i.e., a trunk). type leafReference struct{ p value.Pointer } func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { switch { case pp[0].IsNil(): return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} case pp[1].IsNil(): return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} case pp[0] == pp[1]: return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} default: return &textWrap{Value: s, Metadata: trunkReferences{pp}} } } func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { var prefix string if printAddress { prefix = formatPointer(p, true) } return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} } func makeLeafReference(p value.Pointer, printAddress bool) textNode { out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} var prefix string if printAddress { prefix = formatPointer(p, true) } return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} } // resolveReferences walks the textNode tree searching for any leaf reference // metadata and resolves each against the corresponding trunk references. // Since pointer addresses in memory are not particularly readable to the user, // it replaces each pointer value with an arbitrary and unique reference ID. func resolveReferences(s textNode) { var walkNodes func(textNode, func(textNode)) walkNodes = func(s textNode, f func(textNode)) { f(s) switch s := s.(type) { case *textWrap: walkNodes(s.Value, f) case textList: for _, r := range s { walkNodes(r.Value, f) } } } // Collect all trunks and leaves with reference metadata. var trunks, leaves []*textWrap walkNodes(s, func(s textNode) { if s, ok := s.(*textWrap); ok { switch s.Metadata.(type) { case leafReference: leaves = append(leaves, s) case trunkReference, trunkReferences: trunks = append(trunks, s) } } }) // No leaf references to resolve. if len(leaves) == 0 { return } // Collect the set of all leaf references to resolve. leafPtrs := make(map[value.Pointer]bool) for _, leaf := range leaves { leafPtrs[leaf.Metadata.(leafReference).p] = true } // Collect the set of trunk pointers that are always paired together. // This allows us to assign a single ID to both pointers for brevity. // If a pointer in a pair ever occurs by itself or as a different pair, // then the pair is broken. pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) unpair := func(p value.Pointer) { if !pairedTrunkPtrs[p].IsNil() { pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half } pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half } for _, trunk := range trunks { switch p := trunk.Metadata.(type) { case trunkReference: unpair(p.p) // standalone pointer cannot be part of a pair case trunkReferences: p0, ok0 := pairedTrunkPtrs[p.pp[0]] p1, ok1 := pairedTrunkPtrs[p.pp[1]] switch { case !ok0 && !ok1: // Register the newly seen pair. pairedTrunkPtrs[p.pp[0]] = p.pp[1] pairedTrunkPtrs[p.pp[1]] = p.pp[0] case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: // Exact pair already seen; do nothing. default: // Pair conflicts with some other pair; break all pairs. unpair(p.pp[0]) unpair(p.pp[1]) } } } // Correlate each pointer referenced by leaves to a unique identifier, // and print the IDs for each trunk that matches those pointers. var nextID uint ptrIDs := make(map[value.Pointer]uint) newID := func() uint { id := nextID nextID++ return id } for _, trunk := range trunks { switch p := trunk.Metadata.(type) { case trunkReference: if print := leafPtrs[p.p]; print { id, ok := ptrIDs[p.p] if !ok { id = newID() ptrIDs[p.p] = id } trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) } case trunkReferences: print0 := leafPtrs[p.pp[0]] print1 := leafPtrs[p.pp[1]] if print0 || print1 { id0, ok0 := ptrIDs[p.pp[0]] id1, ok1 := ptrIDs[p.pp[1]] isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] if isPair { var id uint assert(ok0 == ok1) // must be seen together or not at all if ok0 { assert(id0 == id1) // must have the same ID id = id0 } else { id = newID() ptrIDs[p.pp[0]] = id ptrIDs[p.pp[1]] = id } trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) } else { if print0 && !ok0 { id0 = newID() ptrIDs[p.pp[0]] = id0 } if print1 && !ok1 { id1 = newID() ptrIDs[p.pp[1]] = id1 } switch { case print0 && print1: trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) case print0: trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) case print1: trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) } } } } } // Update all leaf references with the unique identifier. for _, leaf := range leaves { if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) } } } func formatReference(id uint) string { return fmt.Sprintf("ref#%d", id) } func updateReferencePrefix(prefix, ref string) string { if prefix == "" { return pointerDelimPrefix + ref + pointerDelimSuffix } suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) return pointerDelimPrefix + ref + ": " + suffix } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/path.go0000644000000000000000000003161215024302467023160 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmp import ( "fmt" "reflect" "strings" "unicode" "unicode/utf8" "github.com/google/go-cmp/cmp/internal/value" ) // Path is a list of [PathStep] describing the sequence of operations to get // from some root type to the current position in the value tree. // The first Path element is always an operation-less [PathStep] that exists // simply to identify the initial type. // // When traversing structs with embedded structs, the embedded struct will // always be accessed as a field before traversing the fields of the // embedded struct themselves. That is, an exported field from the // embedded struct will never be accessed directly from the parent struct. type Path []PathStep // PathStep is a union-type for specific operations to traverse // a value's tree structure. Users of this package never need to implement // these types as values of this type will be returned by this package. // // Implementations of this interface: // - [StructField] // - [SliceIndex] // - [MapIndex] // - [Indirect] // - [TypeAssertion] // - [Transform] type PathStep interface { String() string // Type is the resulting type after performing the path step. Type() reflect.Type // Values is the resulting values after performing the path step. // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: // - For StructField, both are not interface-able if the current field // is unexported and the struct type is not explicitly permitted by // an Exporter to traverse unexported fields. // - For SliceIndex, one may be invalid if an element is missing from // either the x or y slice. // - For MapIndex, one may be invalid if an entry is missing from // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) } var ( _ PathStep = StructField{} _ PathStep = SliceIndex{} _ PathStep = MapIndex{} _ PathStep = Indirect{} _ PathStep = TypeAssertion{} _ PathStep = Transform{} ) func (pa *Path) push(s PathStep) { *pa = append(*pa, s) } func (pa *Path) pop() { *pa = (*pa)[:len(*pa)-1] } // Last returns the last [PathStep] in the Path. // If the path is empty, this returns a non-nil [PathStep] // that reports a nil [PathStep.Type]. func (pa Path) Last() PathStep { return pa.Index(-1) } // Index returns the ith step in the Path and supports negative indexing. // A negative index starts counting from the tail of the Path such that -1 // refers to the last step, -2 refers to the second-to-last step, and so on. // If index is invalid, this returns a non-nil [PathStep] // that reports a nil [PathStep.Type]. func (pa Path) Index(i int) PathStep { if i < 0 { i = len(pa) + i } if i < 0 || i >= len(pa) { return pathStep{} } return pa[i] } // String returns the simplified path to a node. // The simplified path only contains struct field accesses. // // For example: // // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string for _, s := range pa { if _, ok := s.(StructField); ok { ss = append(ss, s.String()) } } return strings.TrimPrefix(strings.Join(ss, ""), ".") } // GoString returns the path to a specific node using Go syntax. // // For example: // // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string var numIndirect int for i, s := range pa { var nextStep PathStep if i+1 < len(pa) { nextStep = pa[i+1] } switch s := s.(type) { case Indirect: numIndirect++ pPre, pPost := "(", ")" switch nextStep.(type) { case Indirect: continue // Next step is indirection, so let them batch up case StructField: numIndirect-- // Automatic indirection on struct fields case nil: pPre, pPost = "", "" // Last step; no need for parenthesis } if numIndirect > 0 { ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) ssPost = append(ssPost, pPost) } numIndirect = 0 continue case Transform: ssPre = append(ssPre, s.trans.name+"(") ssPost = append(ssPost, ")") continue } ssPost = append(ssPost, s.String()) } for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { ssPre[i], ssPre[j] = ssPre[j], ssPre[i] } return strings.Join(ssPre, "") + strings.Join(ssPost, "") } type pathStep struct { typ reflect.Type vx, vy reflect.Value } func (ps pathStep) Type() reflect.Type { return ps.typ } func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } func (ps pathStep) String() string { if ps.typ == nil { return "" } s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } return fmt.Sprintf("{%s}", s) } // StructField is a [PathStep] that represents a struct field access // on a field called [StructField.Name]. type StructField struct{ *structField } type structField struct { pathStep name string idx int // These fields are used for forcibly accessing an unexported field. // pvx, pvy, and field are only valid if unexported is true. unexported bool mayForce bool // Forcibly allow visibility paddr bool // Was parent addressable? pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } func (sf StructField) Type() reflect.Type { return sf.typ } func (sf StructField) Values() (vx, vy reflect.Value) { if !sf.unexported { return sf.vx, sf.vy // CanInterface reports true } // Forcibly obtain read-write access to an unexported struct field. if sf.mayForce { vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) return vx, vy // CanInterface reports true } return sf.vx, sf.vy // CanInterface reports false } func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } // Name is the field name. func (sf StructField) Name() string { return sf.name } // Index is the index of the field in the parent struct type. // See [reflect.Type.Field]. func (sf StructField) Index() int { return sf.idx } // SliceIndex is a [PathStep] that represents an index operation on // a slice or array at some index [SliceIndex.Key]. type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep xkey, ykey int isSlice bool // False for reflect.Array } func (si SliceIndex) Type() reflect.Type { return si.typ } func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } func (si SliceIndex) String() string { switch { case si.xkey == si.ykey: return fmt.Sprintf("[%d]", si.xkey) case si.ykey == -1: // [5->?] means "I don't know where X[5] went" return fmt.Sprintf("[%d->?]", si.xkey) case si.xkey == -1: // [?->3] means "I don't know where Y[3] came from" return fmt.Sprintf("[?->%d]", si.ykey) default: // [5->3] means "X[5] moved to Y[3]" return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) } } // Key is the index key; it may return -1 if in a split state func (si SliceIndex) Key() int { if si.xkey != si.ykey { return -1 } return si.xkey } // SplitKeys are the indexes for indexing into slices in the // x and y values, respectively. These indexes may differ due to the // insertion or removal of an element in one of the slices, causing // all of the indexes to be shifted. If an index is -1, then that // indicates that the element does not exist in the associated slice. // // [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes // returned by SplitKeys are not the same. SplitKeys will never return -1 for // both indexes. func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } // MapIndex is a [PathStep] that represents an index operation on a map at some index Key. type MapIndex struct{ *mapIndex } type mapIndex struct { pathStep key reflect.Value } func (mi MapIndex) Type() reflect.Type { return mi.typ } func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } // Key is the value of the map key. func (mi MapIndex) Key() reflect.Value { return mi.key } // Indirect is a [PathStep] that represents pointer indirection on the parent type. type Indirect struct{ *indirect } type indirect struct { pathStep } func (in Indirect) Type() reflect.Type { return in.typ } func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } func (in Indirect) String() string { return "*" } // TypeAssertion is a [PathStep] that represents a type assertion on an interface. type TypeAssertion struct{ *typeAssertion } type typeAssertion struct { pathStep } func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } // Transform is a [PathStep] that represents a transformation // from the parent type to the current type. type Transform struct{ *transform } type transform struct { pathStep trans *transformer } func (tf Transform) Type() reflect.Type { return tf.typ } func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } // Name is the name of the [Transformer]. func (tf Transform) Name() string { return tf.trans.name } // Func is the function pointer to the transformer function. func (tf Transform) Func() reflect.Value { return tf.trans.fnc } // Option returns the originally constructed [Transformer] option. // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } // pointerPath represents a dual-stack of pointers encountered when // recursively traversing the x and y values. This data structure supports // detection of cycles and determining whether the cycles are equal. // In Go, cycles can occur via pointers, slices, and maps. // // The pointerPath uses a map to represent a stack; where descension into a // pointer pushes the address onto the stack, and ascension from a pointer // pops the address from the stack. Thus, when traversing into a pointer from // reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles // by checking whether the pointer has already been visited. The cycle detection // uses a separate stack for the x and y values. // // If a cycle is detected we need to determine whether the two pointers // should be considered equal. The definition of equality chosen by Equal // requires two graphs to have the same structure. To determine this, both the // x and y values must have a cycle where the previous pointers were also // encountered together as a pair. // // Semantically, this is equivalent to augmenting Indirect, SliceIndex, and // MapIndex with pointer information for the x and y values. // Suppose px and py are two pointers to compare, we then search the // Path for whether px was ever encountered in the Path history of x, and // similarly so with py. If either side has a cycle, the comparison is only // equal if both px and py have a cycle resulting from the same PathStep. // // Using a map as a stack is more performant as we can perform cycle detection // in O(1) instead of O(N) where N is len(Path). type pointerPath struct { // mx is keyed by x pointers, where the value is the associated y pointer. mx map[value.Pointer]value.Pointer // my is keyed by y pointers, where the value is the associated x pointer. my map[value.Pointer]value.Pointer } func (p *pointerPath) Init() { p.mx = make(map[value.Pointer]value.Pointer) p.my = make(map[value.Pointer]value.Pointer) } // Push indicates intent to descend into pointers vx and vy where // visited reports whether either has been seen before. If visited before, // equal reports whether both pointers were encountered together. // Pop must be called if and only if the pointers were never visited. // // The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map // and be non-nil. func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { px := value.PointerOf(vx) py := value.PointerOf(vy) _, ok1 := p.mx[px] _, ok2 := p.my[py] if ok1 || ok2 { equal = p.mx[px] == py && p.my[py] == px // Pointers paired together return equal, true } p.mx[px] = py p.my[py] = px return false, false } // Pop ascends from pointers vx and vy. func (p pointerPath) Pop(vx, vy reflect.Value) { delete(p.mx, value.PointerOf(vx)) delete(p.my, value.PointerOf(vy)) } // isExported reports whether the identifier is exported. func isExported(id string) bool { r, _ := utf8.DecodeRuneInString(id) return unicode.IsUpper(r) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/cmpopts/0000755000000000000000000000000015024302467023357 5ustar rootrootdependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/cmpopts/sort.go0000644000000000000000000001405315024302467024700 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmpopts import ( "fmt" "reflect" "sort" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/internal/function" ) // SortSlices returns a [cmp.Transformer] option that sorts all []V. // The lessOrCompareFunc function must be either // a less function of the form "func(T, T) bool" or // a compare function of the format "func(T, T) int" // which is used to sort any slice with element type V that is assignable to T. // // A less function must be: // - Deterministic: less(x, y) == less(x, y) // - Irreflexive: !less(x, x) // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // // A compare function must be: // - Deterministic: compare(x, y) == compare(x, y) // - Irreflexive: compare(x, x) == 0 // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // // The function does not have to be "total". That is, if x != y, but // less or compare report inequality, their relative order is maintained. // // SortSlices can be used in conjunction with [EquateEmpty]. func SortSlices(lessOrCompareFunc interface{}) cmp.Option { vf := reflect.ValueOf(lessOrCompareFunc) if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() { panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc)) } ss := sliceSorter{vf.Type().In(0), vf} return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort)) } type sliceSorter struct { in reflect.Type // T fnc reflect.Value // func(T, T) bool } func (ss sliceSorter) filter(x, y interface{}) bool { vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) if !(x != nil && y != nil && vx.Type() == vy.Type()) || !(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) || (vx.Len() <= 1 && vy.Len() <= 1) { return false } // Check whether the slices are already sorted to avoid an infinite // recursion cycle applying the same transform to itself. ok1 := sort.SliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) }) ok2 := sort.SliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) }) return !ok1 || !ok2 } func (ss sliceSorter) sort(x interface{}) interface{} { src := reflect.ValueOf(x) dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len()) for i := 0; i < src.Len(); i++ { dst.Index(i).Set(src.Index(i)) } sort.SliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) }) ss.checkSort(dst) return dst.Interface() } func (ss sliceSorter) checkSort(v reflect.Value) { start := -1 // Start of a sequence of equal elements. for i := 1; i < v.Len(); i++ { if ss.less(v, i-1, i) { // Check that first and last elements in v[start:i] are equal. if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) { panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i))) } start = -1 } else if start == -1 { start = i } } } func (ss sliceSorter) less(v reflect.Value, i, j int) bool { vx, vy := v.Index(i), v.Index(j) vo := ss.fnc.Call([]reflect.Value{vx, vy})[0] if vo.Kind() == reflect.Bool { return vo.Bool() } else { return vo.Int() < 0 } } // SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be // a sorted []struct{K, V}. The lessOrCompareFunc function must be either // a less function of the form "func(T, T) bool" or // a compare function of the format "func(T, T) int" // which is used to sort any map with key K that is assignable to T. // // Flattening the map into a slice has the property that [cmp.Equal] is able to // use [cmp.Comparer] options on K or the K.Equal method if it exists. // // A less function must be: // - Deterministic: less(x, y) == less(x, y) // - Irreflexive: !less(x, x) // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // - Total: if x != y, then either less(x, y) or less(y, x) // // A compare function must be: // - Deterministic: compare(x, y) == compare(x, y) // - Irreflexive: compare(x, x) == 0 // - Transitive: if compare(x, y) < 0 and compare(y, z) < 0, then compare(x, z) < 0 // - Total: if x != y, then compare(x, y) != 0 // // SortMaps can be used in conjunction with [EquateEmpty]. func SortMaps(lessOrCompareFunc interface{}) cmp.Option { vf := reflect.ValueOf(lessOrCompareFunc) if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() { panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc)) } ms := mapSorter{vf.Type().In(0), vf} return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort)) } type mapSorter struct { in reflect.Type // T fnc reflect.Value // func(T, T) bool } func (ms mapSorter) filter(x, y interface{}) bool { vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) return (x != nil && y != nil && vx.Type() == vy.Type()) && (vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) && (vx.Len() != 0 || vy.Len() != 0) } func (ms mapSorter) sort(x interface{}) interface{} { src := reflect.ValueOf(x) outType := reflect.StructOf([]reflect.StructField{ {Name: "K", Type: src.Type().Key()}, {Name: "V", Type: src.Type().Elem()}, }) dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len()) for i, k := range src.MapKeys() { v := reflect.New(outType).Elem() v.Field(0).Set(k) v.Field(1).Set(src.MapIndex(k)) dst.Index(i).Set(v) } sort.Slice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) }) ms.checkSort(dst) return dst.Interface() } func (ms mapSorter) checkSort(v reflect.Value) { for i := 1; i < v.Len(); i++ { if !ms.less(v, i-1, i) { panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i))) } } } func (ms mapSorter) less(v reflect.Value, i, j int) bool { vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) vo := ms.fnc.Call([]reflect.Value{vx, vy})[0] if vo.Kind() == reflect.Bool { return vo.Bool() } else { return vo.Int() < 0 } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/cmpopts/xform.go0000644000000000000000000000213115024302467025036 0ustar rootroot// Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmpopts import ( "github.com/google/go-cmp/cmp" ) type xformFilter struct{ xform cmp.Option } func (xf xformFilter) filter(p cmp.Path) bool { for _, ps := range p { if t, ok := ps.(cmp.Transform); ok && t.Option() == xf.xform { return false } } return true } // AcyclicTransformer returns a [cmp.Transformer] with a filter applied that ensures // that the transformer cannot be recursively applied upon its own output. // // An example use case is a transformer that splits a string by lines: // // AcyclicTransformer("SplitLines", func(s string) []string{ // return strings.Split(s, "\n") // }) // // Had this been an unfiltered [cmp.Transformer] instead, this would result in an // infinite cycle converting a string to []string to [][]string and so on. func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option { xf := xformFilter{cmp.Transformer(name, xformFunc)} return cmp.FilterPath(xf.filter, xf.xform) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/cmpopts/util_test.go0000644000000000000000000013634515024302467025736 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmpopts import ( "bytes" "errors" "fmt" "io" "math" "net/netip" "reflect" "strings" "sync" "testing" "time" "github.com/google/go-cmp/cmp" ) type ( MyInt int MyInts []int MyFloat float32 MyString string MyTime struct{ time.Time } MyStruct struct { A, B []int C, D map[time.Time]string } Foo1 struct{ Alpha, Bravo, Charlie int } Foo2 struct{ *Foo1 } Foo3 struct{ *Foo2 } Bar1 struct{ Foo3 } Bar2 struct { Bar1 *Foo3 Bravo float32 } Bar3 struct { Bar1 Bravo *Bar2 Delta struct{ Echo Foo1 } *Foo3 Alpha string } privateStruct struct{ Public, private int } PublicStruct struct{ Public, private int } ParentStruct struct { *privateStruct *PublicStruct Public int private int } Everything struct { MyInt MyFloat MyTime MyStruct Bar3 ParentStruct } EmptyInterface interface{} ) func TestOptions(t *testing.T) { createBar3X := func() *Bar3 { return &Bar3{ Bar1: Bar1{Foo3{&Foo2{&Foo1{Bravo: 2}}}}, Bravo: &Bar2{ Bar1: Bar1{Foo3{&Foo2{&Foo1{Charlie: 7}}}}, Foo3: &Foo3{&Foo2{&Foo1{Bravo: 5}}}, Bravo: 4, }, Delta: struct{ Echo Foo1 }{Foo1{Charlie: 3}}, Foo3: &Foo3{&Foo2{&Foo1{Alpha: 1}}}, Alpha: "alpha", } } createBar3Y := func() *Bar3 { return &Bar3{ Bar1: Bar1{Foo3{&Foo2{&Foo1{Bravo: 3}}}}, Bravo: &Bar2{ Bar1: Bar1{Foo3{&Foo2{&Foo1{Charlie: 8}}}}, Foo3: &Foo3{&Foo2{&Foo1{Bravo: 6}}}, Bravo: 5, }, Delta: struct{ Echo Foo1 }{Foo1{Charlie: 4}}, Foo3: &Foo3{&Foo2{&Foo1{Alpha: 2}}}, Alpha: "ALPHA", } } tests := []struct { label string // Test name x, y interface{} // Input values to compare opts []cmp.Option // Input options wantEqual bool // Whether the inputs are equal wantPanic bool // Whether Equal should panic reason string // The reason for the expected outcome }{{ label: "EquateEmpty", x: []int{}, y: []int(nil), wantEqual: false, reason: "not equal because empty non-nil and nil slice differ", }, { label: "EquateEmpty", x: []int{}, y: []int(nil), opts: []cmp.Option{EquateEmpty()}, wantEqual: true, reason: "equal because EquateEmpty equates empty slices", }, { label: "SortSlices", x: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, y: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, wantEqual: false, reason: "not equal because element order differs", }, { label: "SortSlices", x: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, y: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, opts: []cmp.Option{SortSlices(func(x, y int) bool { return x < y })}, wantEqual: true, reason: "equal because SortSlices sorts the slices", }, { label: "SortSlices", x: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, y: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, opts: []cmp.Option{SortSlices(func(x, y int) int { // TODO(Go1.22): Use cmp.Compare. switch { case x < y: return -1 case y > x: return +1 default: return 0 } })}, wantEqual: true, reason: "equal because SortSlices sorts the slices", }, { label: "SortSlices", x: []MyInt{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, y: []MyInt{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, opts: []cmp.Option{SortSlices(func(x, y int) bool { return x < y })}, wantEqual: false, reason: "not equal because MyInt is not the same type as int", }, { label: "SortSlices", x: []float64{0, 1, 1, 2, 2, 2}, y: []float64{2, 0, 2, 1, 2, 1}, opts: []cmp.Option{SortSlices(func(x, y float64) bool { return x < y })}, wantEqual: true, reason: "equal even when sorted with duplicate elements", }, { label: "SortSlices", x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, 3, 4, 4, 4, 4}, y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, 2}, opts: []cmp.Option{SortSlices(func(x, y float64) bool { return x < y })}, wantPanic: true, reason: "panics because SortSlices used with non-transitive less function", }, { label: "SortSlices", x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, 3, 4, 4, 4, 4}, y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, 2}, opts: []cmp.Option{SortSlices(func(x, y float64) bool { return (!math.IsNaN(x) && math.IsNaN(y)) || x < y })}, wantEqual: false, reason: "no panics because SortSlices used with valid less function; not equal because NaN != NaN", }, { label: "SortSlices+EquateNaNs", x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, math.NaN(), 3, 4, 4, 4, 4}, y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, math.NaN(), 2}, opts: []cmp.Option{ EquateNaNs(), SortSlices(func(x, y float64) bool { return (!math.IsNaN(x) && math.IsNaN(y)) || x < y }), }, wantEqual: true, reason: "no panics because SortSlices used with valid less function; equal because EquateNaNs is used", }, { label: "SortMaps", x: map[time.Time]string{ time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday", time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday", time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC): "2nd birthday", }, y: map[time.Time]string{ time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday", time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday", time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "2nd birthday", }, wantEqual: false, reason: "not equal because timezones differ", }, { label: "SortMaps", x: map[time.Time]string{ time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday", time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday", time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC): "2nd birthday", }, y: map[time.Time]string{ time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday", time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday", time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "2nd birthday", }, opts: []cmp.Option{SortMaps(func(x, y time.Time) bool { return x.Before(y) })}, wantEqual: true, reason: "equal because SortMaps flattens to a slice where Time.Equal can be used", }, { label: "SortMaps", x: map[time.Time]string{ time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday", time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday", time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC): "2nd birthday", }, y: map[time.Time]string{ time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday", time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday", time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "2nd birthday", }, opts: []cmp.Option{SortMaps(func(x, y time.Time) int { return time.Time.Compare(x, y) })}, wantEqual: true, reason: "equal because SortMaps flattens to a slice where Time.Equal can be used", }, { label: "SortMaps", x: map[MyTime]string{ {time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)}: "0th birthday", {time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)}: "1st birthday", {time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC)}: "2nd birthday", }, y: map[MyTime]string{ {time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "0th birthday", {time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "1st birthday", {time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "2nd birthday", }, opts: []cmp.Option{SortMaps(func(x, y time.Time) bool { return x.Before(y) })}, wantEqual: false, reason: "not equal because MyTime is not assignable to time.Time", }, { label: "SortMaps", x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""}, // => {0, 1, 2, 3, -1, -2, -3}, y: map[int]string{300: "", 200: "", 100: "", 0: "", 1: "", 2: "", 3: ""}, // => {0, 1, 2, 3, 100, 200, 300}, opts: []cmp.Option{SortMaps(func(a, b int) bool { if -10 < a && a <= 0 { a *= -100 } if -10 < b && b <= 0 { b *= -100 } return a < b })}, wantEqual: false, reason: "not equal because values differ even though SortMap provides valid ordering", }, { label: "SortMaps", x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""}, // => {0, 1, 2, 3, -1, -2, -3}, y: map[int]string{300: "", 200: "", 100: "", 0: "", 1: "", 2: "", 3: ""}, // => {0, 1, 2, 3, 100, 200, 300}, opts: []cmp.Option{ SortMaps(func(x, y int) bool { if -10 < x && x <= 0 { x *= -100 } if -10 < y && y <= 0 { y *= -100 } return x < y }), cmp.Comparer(func(x, y int) bool { if -10 < x && x <= 0 { x *= -100 } if -10 < y && y <= 0 { y *= -100 } return x == y }), }, wantEqual: true, reason: "equal because Comparer used to equate differences", }, { label: "SortMaps", x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""}, y: map[int]string{}, opts: []cmp.Option{SortMaps(func(x, y int) bool { return x < y && x >= 0 && y >= 0 })}, wantPanic: true, reason: "panics because SortMaps used with non-transitive less function", }, { label: "SortMaps", x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""}, y: map[int]string{}, opts: []cmp.Option{SortMaps(func(x, y int) bool { return math.Abs(float64(x)) < math.Abs(float64(y)) })}, wantPanic: true, reason: "panics because SortMaps used with partial less function", }, { label: "EquateEmpty+SortSlices+SortMaps", x: MyStruct{ A: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, C: map[time.Time]string{ time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday", time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday", }, D: map[time.Time]string{}, }, y: MyStruct{ A: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, B: []int{}, C: map[time.Time]string{ time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday", time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday", }, }, opts: []cmp.Option{ EquateEmpty(), SortSlices(func(x, y int) bool { return x < y }), SortMaps(func(x, y time.Time) bool { return x.Before(y) }), }, wantEqual: true, reason: "no panics because EquateEmpty should compose with the sort options", }, { label: "EquateApprox", x: 3.09, y: 3.10, wantEqual: false, reason: "not equal because floats do not exactly matches", }, { label: "EquateApprox", x: 3.09, y: 3.10, opts: []cmp.Option{EquateApprox(0, 0)}, wantEqual: false, reason: "not equal because EquateApprox(0 ,0) is equivalent to using ==", }, { label: "EquateApprox", x: 3.09, y: 3.10, opts: []cmp.Option{EquateApprox(0.003, 0.009)}, wantEqual: false, reason: "not equal because EquateApprox is too strict", }, { label: "EquateApprox", x: 3.09, y: 3.10, opts: []cmp.Option{EquateApprox(0, 0.011)}, wantEqual: true, reason: "equal because margin is loose enough to match", }, { label: "EquateApprox", x: 3.09, y: 3.10, opts: []cmp.Option{EquateApprox(0.004, 0)}, wantEqual: true, reason: "equal because fraction is loose enough to match", }, { label: "EquateApprox", x: 3.09, y: 3.10, opts: []cmp.Option{EquateApprox(0.004, 0.011)}, wantEqual: true, reason: "equal because both the margin and fraction are loose enough to match", }, { label: "EquateApprox", x: float32(3.09), y: float64(3.10), opts: []cmp.Option{EquateApprox(0.004, 0)}, wantEqual: false, reason: "not equal because the types differ", }, { label: "EquateApprox", x: float32(3.09), y: float32(3.10), opts: []cmp.Option{EquateApprox(0.004, 0)}, wantEqual: true, reason: "equal because EquateApprox also applies on float32s", }, { label: "EquateApprox", x: []float64{math.Inf(+1), math.Inf(-1)}, y: []float64{math.Inf(+1), math.Inf(-1)}, opts: []cmp.Option{EquateApprox(0, 1)}, wantEqual: true, reason: "equal because we fall back on == which matches Inf (EquateApprox does not apply on Inf) ", }, { label: "EquateApprox", x: []float64{math.Inf(+1), -1e100}, y: []float64{+1e100, math.Inf(-1)}, opts: []cmp.Option{EquateApprox(0, 1)}, wantEqual: false, reason: "not equal because we fall back on == where Inf != 1e100 (EquateApprox does not apply on Inf)", }, { label: "EquateApprox", x: float64(+1e100), y: float64(-1e100), opts: []cmp.Option{EquateApprox(math.Inf(+1), 0)}, wantEqual: true, reason: "equal because infinite fraction matches everything", }, { label: "EquateApprox", x: float64(+1e100), y: float64(-1e100), opts: []cmp.Option{EquateApprox(0, math.Inf(+1))}, wantEqual: true, reason: "equal because infinite margin matches everything", }, { label: "EquateApprox", x: math.Pi, y: math.Pi, opts: []cmp.Option{EquateApprox(0, 0)}, wantEqual: true, reason: "equal because EquateApprox(0, 0) is equivalent to ==", }, { label: "EquateApprox", x: math.Pi, y: math.Nextafter(math.Pi, math.Inf(+1)), opts: []cmp.Option{EquateApprox(0, 0)}, wantEqual: false, reason: "not equal because EquateApprox(0, 0) is equivalent to ==", }, { label: "EquateNaNs", x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)}, y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)}, wantEqual: false, reason: "not equal because NaN != NaN", }, { label: "EquateNaNs", x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)}, y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)}, opts: []cmp.Option{EquateNaNs()}, wantEqual: true, reason: "equal because EquateNaNs allows NaN == NaN", }, { label: "EquateNaNs", x: []float32{1.0, float32(math.NaN()), math.E, -0.0, +0.0}, y: []float32{1.0, float32(math.NaN()), math.E, -0.0, +0.0}, opts: []cmp.Option{EquateNaNs()}, wantEqual: true, reason: "equal because EquateNaNs operates on float32", }, { label: "EquateApprox+EquateNaNs", x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1), 1.01, 5001}, y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1), 1.02, 5002}, opts: []cmp.Option{ EquateNaNs(), EquateApprox(0.01, 0), }, wantEqual: true, reason: "equal because EquateNaNs and EquateApprox compose together", }, { label: "EquateApprox+EquateNaNs", x: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.01, 5001}, y: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.02, 5002}, opts: []cmp.Option{ EquateNaNs(), EquateApprox(0.01, 0), }, wantEqual: false, reason: "not equal because EquateApprox and EquateNaNs do not apply on a named type", }, { label: "EquateApprox+EquateNaNs+Transform", x: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.01, 5001}, y: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.02, 5002}, opts: []cmp.Option{ cmp.Transformer("", func(x MyFloat) float64 { return float64(x) }), EquateNaNs(), EquateApprox(0.01, 0), }, wantEqual: true, reason: "equal because named type is transformed to float64", }, { label: "EquateApproxTime", x: time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), y: time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), opts: []cmp.Option{EquateApproxTime(0)}, wantEqual: true, reason: "equal because times are identical", }, { label: "EquateApproxTime", x: time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), y: time.Date(2009, 11, 10, 23, 0, 3, 0, time.UTC), opts: []cmp.Option{EquateApproxTime(3 * time.Second)}, wantEqual: true, reason: "equal because time is exactly at the allowed margin", }, { label: "EquateApproxTime", x: time.Date(2009, 11, 10, 23, 0, 3, 0, time.UTC), y: time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), opts: []cmp.Option{EquateApproxTime(3 * time.Second)}, wantEqual: true, reason: "equal because time is exactly at the allowed margin (negative)", }, { label: "EquateApproxTime", x: time.Date(2009, 11, 10, 23, 0, 3, 0, time.UTC), y: time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), opts: []cmp.Option{EquateApproxTime(3*time.Second - 1)}, wantEqual: false, reason: "not equal because time is outside allowed margin", }, { label: "EquateApproxTime", x: time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), y: time.Date(2009, 11, 10, 23, 0, 3, 0, time.UTC), opts: []cmp.Option{EquateApproxTime(3*time.Second - 1)}, wantEqual: false, reason: "not equal because time is outside allowed margin (negative)", }, { label: "EquateApproxTime", x: time.Time{}, y: time.Time{}, opts: []cmp.Option{EquateApproxTime(3 * time.Second)}, wantEqual: true, reason: "equal because both times are zero", }, { label: "EquateApproxTime", x: time.Time{}, y: time.Time{}.Add(1), opts: []cmp.Option{EquateApproxTime(3 * time.Second)}, wantEqual: false, reason: "not equal because zero time is always not equal not non-zero", }, { label: "EquateApproxTime", x: time.Time{}.Add(1), y: time.Time{}, opts: []cmp.Option{EquateApproxTime(3 * time.Second)}, wantEqual: false, reason: "not equal because zero time is always not equal not non-zero", }, { label: "EquateApproxTime", x: time.Date(2409, 11, 10, 23, 0, 0, 0, time.UTC), y: time.Date(2000, 11, 10, 23, 0, 3, 0, time.UTC), opts: []cmp.Option{EquateApproxTime(3 * time.Second)}, wantEqual: false, reason: "time difference overflows time.Duration", }, { label: "EquateErrors", x: nil, y: nil, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "nil values are equal", }, { label: "EquateErrors", x: errors.New("EOF"), y: io.EOF, opts: []cmp.Option{EquateErrors()}, wantEqual: false, reason: "user-defined EOF is not exactly equal", }, { label: "EquateErrors", x: fmt.Errorf("wrapped: %w", io.EOF), y: io.EOF, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "wrapped io.EOF is equal according to errors.Is", }, { label: "EquateErrors", x: fmt.Errorf("wrapped: %w", io.EOF), y: io.EOF, wantEqual: false, reason: "wrapped io.EOF is not equal without EquateErrors option", }, { label: "EquateErrors", x: io.EOF, y: io.EOF, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "sentinel errors are equal", }, { label: "EquateErrors", x: io.EOF, y: AnyError, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "AnyError is equal to any non-nil error", }, { label: "EquateErrors", x: io.EOF, y: AnyError, wantEqual: false, reason: "AnyError is not equal to any non-nil error without EquateErrors option", }, { label: "EquateErrors", x: nil, y: AnyError, opts: []cmp.Option{EquateErrors()}, wantEqual: false, reason: "AnyError is not equal to nil value", }, { label: "EquateErrors", x: nil, y: nil, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "nil values are equal", }, { label: "EquateErrors", x: errors.New("EOF"), y: io.EOF, opts: []cmp.Option{EquateErrors()}, wantEqual: false, reason: "user-defined EOF is not exactly equal", }, { label: "EquateErrors", x: fmt.Errorf("wrapped: %w", io.EOF), y: io.EOF, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "wrapped io.EOF is equal according to errors.Is", }, { label: "EquateErrors", x: fmt.Errorf("wrapped: %w", io.EOF), y: io.EOF, wantEqual: false, reason: "wrapped io.EOF is not equal without EquateErrors option", }, { label: "EquateErrors", x: io.EOF, y: io.EOF, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "sentinel errors are equal", }, { label: "EquateErrors", x: io.EOF, y: AnyError, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "AnyError is equal to any non-nil error", }, { label: "EquateErrors", x: io.EOF, y: AnyError, wantEqual: false, reason: "AnyError is not equal to any non-nil error without EquateErrors option", }, { label: "EquateErrors", x: nil, y: AnyError, opts: []cmp.Option{EquateErrors()}, wantEqual: false, reason: "AnyError is not equal to nil value", }, { label: "EquateErrors", x: struct{ E error }{nil}, y: struct{ E error }{nil}, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "nil values are equal", }, { label: "EquateErrors", x: struct{ E error }{errors.New("EOF")}, y: struct{ E error }{io.EOF}, opts: []cmp.Option{EquateErrors()}, wantEqual: false, reason: "user-defined EOF is not exactly equal", }, { label: "EquateErrors", x: struct{ E error }{fmt.Errorf("wrapped: %w", io.EOF)}, y: struct{ E error }{io.EOF}, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "wrapped io.EOF is equal according to errors.Is", }, { label: "EquateErrors", x: struct{ E error }{fmt.Errorf("wrapped: %w", io.EOF)}, y: struct{ E error }{io.EOF}, wantEqual: false, reason: "wrapped io.EOF is not equal without EquateErrors option", }, { label: "EquateErrors", x: struct{ E error }{io.EOF}, y: struct{ E error }{io.EOF}, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "sentinel errors are equal", }, { label: "EquateErrors", x: struct{ E error }{io.EOF}, y: struct{ E error }{AnyError}, opts: []cmp.Option{EquateErrors()}, wantEqual: true, reason: "AnyError is equal to any non-nil error", }, { label: "EquateErrors", x: struct{ E error }{io.EOF}, y: struct{ E error }{AnyError}, wantEqual: false, reason: "AnyError is not equal to any non-nil error without EquateErrors option", }, { label: "EquateErrors", x: struct{ E error }{nil}, y: struct{ E error }{AnyError}, opts: []cmp.Option{EquateErrors()}, wantEqual: false, reason: "AnyError is not equal to nil value", }, { label: "EquateComparable", x: []struct{ P netip.Addr }{ {netip.AddrFrom4([4]byte{1, 2, 3, 4})}, {netip.AddrFrom4([4]byte{1, 2, 3, 5})}, {netip.AddrFrom4([4]byte{1, 2, 3, 6})}, }, y: []struct{ P netip.Addr }{ {netip.AddrFrom4([4]byte{1, 2, 3, 4})}, {netip.AddrFrom4([4]byte{1, 2, 3, 5})}, {netip.AddrFrom4([4]byte{1, 2, 3, 6})}, }, opts: []cmp.Option{EquateComparable(netip.Addr{})}, wantEqual: true, reason: "equal because all IP addresses are the same", }, { label: "EquateComparable", x: []struct{ P netip.Addr }{ {netip.AddrFrom4([4]byte{1, 2, 3, 4})}, {netip.AddrFrom4([4]byte{1, 2, 3, 5})}, {netip.AddrFrom4([4]byte{1, 2, 3, 6})}, }, y: []struct{ P netip.Addr }{ {netip.AddrFrom4([4]byte{1, 2, 3, 4})}, {netip.AddrFrom4([4]byte{1, 2, 3, 7})}, {netip.AddrFrom4([4]byte{1, 2, 3, 6})}, }, opts: []cmp.Option{EquateComparable(netip.Addr{})}, wantEqual: false, reason: "not equal because second IP address is different", }, { label: "IgnoreFields", x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, wantEqual: false, reason: "not equal because values do not match in deeply embedded field", }, { label: "IgnoreFields", x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, opts: []cmp.Option{IgnoreFields(Bar1{}, "Alpha")}, wantEqual: true, reason: "equal because IgnoreField ignores deeply embedded field: Alpha", }, { label: "IgnoreFields", x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo1.Alpha")}, wantEqual: true, reason: "equal because IgnoreField ignores deeply embedded field: Foo1.Alpha", }, { label: "IgnoreFields", x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo2.Alpha")}, wantEqual: true, reason: "equal because IgnoreField ignores deeply embedded field: Foo2.Alpha", }, { label: "IgnoreFields", x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo3.Alpha")}, wantEqual: true, reason: "equal because IgnoreField ignores deeply embedded field: Foo3.Alpha", }, { label: "IgnoreFields", x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo3.Foo2.Alpha")}, wantEqual: true, reason: "equal because IgnoreField ignores deeply embedded field: Foo3.Foo2.Alpha", }, { label: "IgnoreFields", x: createBar3X(), y: createBar3Y(), wantEqual: false, reason: "not equal because many deeply nested or embedded fields differ", }, { label: "IgnoreFields", x: createBar3X(), y: createBar3Y(), opts: []cmp.Option{IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Foo3", "Alpha")}, wantEqual: true, reason: "equal because IgnoreFields ignores fields at the highest levels", }, { label: "IgnoreFields", x: createBar3X(), y: createBar3Y(), opts: []cmp.Option{ IgnoreFields(Bar3{}, "Bar1.Foo3.Bravo", "Bravo.Bar1.Foo3.Foo2.Foo1.Charlie", "Bravo.Foo3.Foo2.Foo1.Bravo", "Bravo.Bravo", "Delta.Echo.Charlie", "Foo3.Foo2.Foo1.Alpha", "Alpha", ), }, wantEqual: true, reason: "equal because IgnoreFields ignores fields using fully-qualified field", }, { label: "IgnoreFields", x: createBar3X(), y: createBar3Y(), opts: []cmp.Option{ IgnoreFields(Bar3{}, "Bar1.Foo3.Bravo", "Bravo.Foo3.Foo2.Foo1.Bravo", "Bravo.Bravo", "Delta.Echo.Charlie", "Foo3.Foo2.Foo1.Alpha", "Alpha", ), }, wantEqual: false, reason: "not equal because one fully-qualified field is not ignored: Bravo.Bar1.Foo3.Foo2.Foo1.Charlie", }, { label: "IgnoreFields", x: createBar3X(), y: createBar3Y(), opts: []cmp.Option{IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Alpha")}, wantEqual: false, reason: "not equal because highest-level field is not ignored: Foo3", }, { label: "IgnoreFields", x: ParentStruct{ privateStruct: &privateStruct{private: 1}, PublicStruct: &PublicStruct{private: 2}, private: 3, }, y: ParentStruct{ privateStruct: &privateStruct{private: 10}, PublicStruct: &PublicStruct{private: 20}, private: 30, }, opts: []cmp.Option{cmp.AllowUnexported(ParentStruct{}, PublicStruct{}, privateStruct{})}, wantEqual: false, reason: "not equal because unexported fields mismatch", }, { label: "IgnoreFields", x: ParentStruct{ privateStruct: &privateStruct{private: 1}, PublicStruct: &PublicStruct{private: 2}, private: 3, }, y: ParentStruct{ privateStruct: &privateStruct{private: 10}, PublicStruct: &PublicStruct{private: 20}, private: 30, }, opts: []cmp.Option{ cmp.AllowUnexported(ParentStruct{}, PublicStruct{}, privateStruct{}), IgnoreFields(ParentStruct{}, "PublicStruct.private", "privateStruct.private", "private"), }, wantEqual: true, reason: "equal because mismatching unexported fields are ignored", }, { label: "IgnoreTypes", x: []interface{}{5, "same"}, y: []interface{}{6, "same"}, wantEqual: false, reason: "not equal because 5 != 6", }, { label: "IgnoreTypes", x: []interface{}{5, "same"}, y: []interface{}{6, "same"}, opts: []cmp.Option{IgnoreTypes(0)}, wantEqual: true, reason: "equal because ints are ignored", }, { label: "IgnoreTypes+IgnoreInterfaces", x: []interface{}{5, "same", new(bytes.Buffer)}, y: []interface{}{6, "same", new(bytes.Buffer)}, opts: []cmp.Option{IgnoreTypes(0)}, wantPanic: true, reason: "panics because bytes.Buffer has unexported fields", }, { label: "IgnoreTypes+IgnoreInterfaces", x: []interface{}{5, "same", new(bytes.Buffer)}, y: []interface{}{6, "diff", new(bytes.Buffer)}, opts: []cmp.Option{ IgnoreTypes(0, ""), IgnoreInterfaces(struct{ io.Reader }{}), }, wantEqual: true, reason: "equal because bytes.Buffer is ignored by match on interface type", }, { label: "IgnoreTypes+IgnoreInterfaces", x: []interface{}{5, "same", new(bytes.Buffer)}, y: []interface{}{6, "same", new(bytes.Buffer)}, opts: []cmp.Option{ IgnoreTypes(0, ""), IgnoreInterfaces(struct { io.Reader io.Writer fmt.Stringer }{}), }, wantEqual: true, reason: "equal because bytes.Buffer is ignored by match on multiple interface types", }, { label: "IgnoreInterfaces", x: struct{ mu sync.Mutex }{}, y: struct{ mu sync.Mutex }{}, wantPanic: true, reason: "panics because sync.Mutex has unexported fields", }, { label: "IgnoreInterfaces", x: struct{ mu sync.Mutex }{}, y: struct{ mu sync.Mutex }{}, opts: []cmp.Option{IgnoreInterfaces(struct{ sync.Locker }{})}, wantEqual: true, reason: "equal because IgnoreInterfaces applies on values (with pointer receiver)", }, { label: "IgnoreInterfaces", x: struct{ mu *sync.Mutex }{}, y: struct{ mu *sync.Mutex }{}, opts: []cmp.Option{IgnoreInterfaces(struct{ sync.Locker }{})}, wantEqual: true, reason: "equal because IgnoreInterfaces applies on pointers", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2}, y: ParentStruct{Public: 1, private: -2}, opts: []cmp.Option{cmp.AllowUnexported(ParentStruct{})}, wantEqual: false, reason: "not equal because ParentStruct.private differs with AllowUnexported", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2}, y: ParentStruct{Public: 1, private: -2}, opts: []cmp.Option{IgnoreUnexported(ParentStruct{})}, wantEqual: true, reason: "equal because IgnoreUnexported ignored ParentStruct.private", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}}, y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: 4}}, opts: []cmp.Option{ cmp.AllowUnexported(PublicStruct{}), IgnoreUnexported(ParentStruct{}), }, wantEqual: true, reason: "equal because ParentStruct.private is ignored", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}}, y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: -4}}, opts: []cmp.Option{ cmp.AllowUnexported(PublicStruct{}), IgnoreUnexported(ParentStruct{}), }, wantEqual: false, reason: "not equal because ParentStruct.PublicStruct.private differs and not ignored by IgnoreUnexported(ParentStruct{})", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}}, y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: -4}}, opts: []cmp.Option{ IgnoreUnexported(ParentStruct{}, PublicStruct{}), }, wantEqual: true, reason: "equal because both ParentStruct.PublicStruct and ParentStruct.PublicStruct.private are ignored", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}}, y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}}, opts: []cmp.Option{ cmp.AllowUnexported(privateStruct{}, PublicStruct{}, ParentStruct{}), }, wantEqual: false, reason: "not equal since ParentStruct.privateStruct differs", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}}, y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}}, opts: []cmp.Option{ cmp.AllowUnexported(privateStruct{}, PublicStruct{}), IgnoreUnexported(ParentStruct{}), }, wantEqual: true, reason: "equal because ParentStruct.privateStruct ignored by IgnoreUnexported(ParentStruct{})", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}}, y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: -4}}, opts: []cmp.Option{ cmp.AllowUnexported(PublicStruct{}, ParentStruct{}), IgnoreUnexported(privateStruct{}), }, wantEqual: true, reason: "equal because privateStruct.private ignored by IgnoreUnexported(privateStruct{})", }, { label: "IgnoreUnexported", x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}}, y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}}, opts: []cmp.Option{ cmp.AllowUnexported(PublicStruct{}, ParentStruct{}), IgnoreUnexported(privateStruct{}), }, wantEqual: false, reason: "not equal because privateStruct.Public differs and not ignored by IgnoreUnexported(privateStruct{})", }, { label: "IgnoreFields+IgnoreTypes+IgnoreUnexported", x: &Everything{ MyInt: 5, MyFloat: 3.3, MyTime: MyTime{time.Now()}, Bar3: *createBar3X(), ParentStruct: ParentStruct{ Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}, }, }, y: &Everything{ MyInt: -5, MyFloat: 3.3, MyTime: MyTime{time.Now()}, Bar3: *createBar3Y(), ParentStruct: ParentStruct{ Public: 1, private: -2, PublicStruct: &PublicStruct{Public: -3, private: -4}, }, }, opts: []cmp.Option{ IgnoreFields(Everything{}, "MyTime", "Bar3.Foo3"), IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Alpha"), IgnoreTypes(MyInt(0), PublicStruct{}), IgnoreUnexported(ParentStruct{}), }, wantEqual: true, reason: "equal because all Ignore options can be composed together", }, { label: "IgnoreSliceElements", x: []int{1, 0, 2, 3, 0, 4, 0, 0}, y: []int{0, 0, 0, 0, 1, 2, 3, 4}, opts: []cmp.Option{ IgnoreSliceElements(func(v int) bool { return v == 0 }), }, wantEqual: true, reason: "equal because zero elements are ignored", }, { label: "IgnoreSliceElements", x: []MyInt{1, 0, 2, 3, 0, 4, 0, 0}, y: []MyInt{0, 0, 0, 0, 1, 2, 3, 4}, opts: []cmp.Option{ IgnoreSliceElements(func(v int) bool { return v == 0 }), }, wantEqual: false, reason: "not equal because MyInt is not assignable to int", }, { label: "IgnoreSliceElements", x: MyInts{1, 0, 2, 3, 0, 4, 0, 0}, y: MyInts{0, 0, 0, 0, 1, 2, 3, 4}, opts: []cmp.Option{ IgnoreSliceElements(func(v int) bool { return v == 0 }), }, wantEqual: true, reason: "equal because the element type of MyInts is assignable to int", }, { label: "IgnoreSliceElements+EquateEmpty", x: []MyInt{}, y: []MyInt{0, 0, 0, 0}, opts: []cmp.Option{ IgnoreSliceElements(func(v int) bool { return v == 0 }), EquateEmpty(), }, wantEqual: false, reason: "not equal because ignored elements does not imply empty slice", }, { label: "IgnoreMapEntries", x: map[string]int{"one": 1, "TWO": 2, "three": 3, "FIVE": 5}, y: map[string]int{"one": 1, "three": 3, "TEN": 10}, opts: []cmp.Option{ IgnoreMapEntries(func(k string, v int) bool { return strings.ToUpper(k) == k }), }, wantEqual: true, reason: "equal because uppercase keys are ignored", }, { label: "IgnoreMapEntries", x: map[MyString]int{"one": 1, "TWO": 2, "three": 3, "FIVE": 5}, y: map[MyString]int{"one": 1, "three": 3, "TEN": 10}, opts: []cmp.Option{ IgnoreMapEntries(func(k string, v int) bool { return strings.ToUpper(k) == k }), }, wantEqual: false, reason: "not equal because MyString is not assignable to string", }, { label: "IgnoreMapEntries", x: map[string]MyInt{"one": 1, "TWO": 2, "three": 3, "FIVE": 5}, y: map[string]MyInt{"one": 1, "three": 3, "TEN": 10}, opts: []cmp.Option{ IgnoreMapEntries(func(k string, v int) bool { return strings.ToUpper(k) == k }), }, wantEqual: false, reason: "not equal because MyInt is not assignable to int", }, { label: "IgnoreMapEntries+EquateEmpty", x: map[string]MyInt{"ONE": 1, "TWO": 2, "THREE": 3}, y: nil, opts: []cmp.Option{ IgnoreMapEntries(func(k string, v int) bool { return strings.ToUpper(k) == k }), EquateEmpty(), }, wantEqual: false, reason: "not equal because ignored entries does not imply empty map", }, { label: "AcyclicTransformer", x: "a\nb\nc\nd", y: "a\nb\nd\nd", opts: []cmp.Option{ AcyclicTransformer("", func(s string) []string { return strings.Split(s, "\n") }), }, wantEqual: false, reason: "not equal because 3rd line differs, but should not recurse infinitely", }, { label: "AcyclicTransformer", x: []string{"foo", "Bar", "BAZ"}, y: []string{"Foo", "BAR", "baz"}, opts: []cmp.Option{ AcyclicTransformer("", strings.ToUpper), }, wantEqual: true, reason: "equal because of strings.ToUpper; AcyclicTransformer unnecessary, but check this still works", }, { label: "AcyclicTransformer", x: "this is a sentence", y: "this is a sentence", opts: []cmp.Option{ AcyclicTransformer("", strings.Fields), }, wantEqual: true, reason: "equal because acyclic transformer splits on any contiguous whitespace", }} for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { var gotEqual bool var gotPanic string func() { defer func() { if ex := recover(); ex != nil { gotPanic = fmt.Sprint(ex) } }() gotEqual = cmp.Equal(tt.x, tt.y, tt.opts...) }() switch { case tt.reason == "": t.Errorf("reason must be provided") case gotPanic == "" && tt.wantPanic: t.Errorf("expected Equal panic\nreason: %s", tt.reason) case gotPanic != "" && !tt.wantPanic: t.Errorf("unexpected Equal panic: got %v\nreason: %v", gotPanic, tt.reason) case gotEqual != tt.wantEqual: t.Errorf("Equal = %v, want %v\nreason: %v", gotEqual, tt.wantEqual, tt.reason) } }) } } func TestPanic(t *testing.T) { args := func(x ...interface{}) []interface{} { return x } tests := []struct { label string // Test name fnc interface{} // Option function to call args []interface{} // Arguments to pass in wantPanic string // Expected panic message reason string // The reason for the expected outcome }{{ label: "EquateApprox", fnc: EquateApprox, args: args(0.0, 0.0), reason: "zero margin and fraction is equivalent to exact equality", }, { label: "EquateApprox", fnc: EquateApprox, args: args(-0.1, 0.0), wantPanic: "margin or fraction must be a non-negative number", reason: "negative inputs are invalid", }, { label: "EquateApprox", fnc: EquateApprox, args: args(0.0, -0.1), wantPanic: "margin or fraction must be a non-negative number", reason: "negative inputs are invalid", }, { label: "EquateApprox", fnc: EquateApprox, args: args(math.NaN(), 0.0), wantPanic: "margin or fraction must be a non-negative number", reason: "NaN inputs are invalid", }, { label: "EquateApprox", fnc: EquateApprox, args: args(1.0, 0.0), reason: "fraction of 1.0 or greater is valid", }, { label: "EquateApprox", fnc: EquateApprox, args: args(0.0, math.Inf(+1)), reason: "margin of infinity is valid", }, { label: "EquateApproxTime", fnc: EquateApproxTime, args: args(time.Duration(-1)), wantPanic: "margin must be a non-negative number", reason: "negative duration is invalid", }, { label: "SortSlices", fnc: SortSlices, args: args((func(_, _ int) bool)(nil)), wantPanic: "invalid less or compare function", reason: "nil value is not valid", }, { label: "SortMaps", fnc: SortMaps, args: args((func(_, _ int) bool)(nil)), wantPanic: "invalid less or compare function", reason: "nil value is not valid", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(Foo1{}, ""), wantPanic: "name must not be empty", reason: "empty selector is invalid", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(Foo1{}, "."), wantPanic: "name must not be empty", reason: "single dot selector is invalid", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(Foo1{}, ".Alpha"), reason: "dot-prefix is okay since Foo1.Alpha reads naturally", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(Foo1{}, "Alpha."), wantPanic: "name must not be empty", reason: "dot-suffix is invalid", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(Foo1{}, "Alpha "), wantPanic: "does not exist", reason: "identifiers must not have spaces", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(Foo1{}, "Zulu"), wantPanic: "does not exist", reason: "name of non-existent field is invalid", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(Foo1{}, "Alpha.NoExist"), wantPanic: "must be a struct", reason: "cannot select into a non-struct", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(&Foo1{}, "Alpha"), wantPanic: "must be a non-pointer struct", reason: "the type must be a struct (not pointer to a struct)", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(struct{ privateStruct }{}, "privateStruct"), reason: "privateStruct field permitted since it is the default name of the embedded type", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(struct{ privateStruct }{}, "Public"), reason: "Public field permitted since it is a forwarded field that is exported", }, { label: "IgnoreFields", fnc: IgnoreFields, args: args(struct{ privateStruct }{}, "private"), wantPanic: "does not exist", reason: "private field not permitted since it is a forwarded field that is unexported", }, { label: "IgnoreTypes", fnc: IgnoreTypes, reason: "empty input is valid", }, { label: "IgnoreTypes", fnc: IgnoreTypes, args: args(nil), wantPanic: "cannot determine type", reason: "input must not be nil value", }, { label: "IgnoreTypes", fnc: IgnoreTypes, args: args(0, 0, 0), reason: "duplicate inputs of the same type is valid", }, { label: "IgnoreInterfaces", fnc: IgnoreInterfaces, args: args(nil), wantPanic: "input must be an anonymous struct", reason: "input must not be nil value", }, { label: "IgnoreInterfaces", fnc: IgnoreInterfaces, args: args(Foo1{}), wantPanic: "input must be an anonymous struct", reason: "input must not be a named struct type", }, { label: "IgnoreInterfaces", fnc: IgnoreInterfaces, args: args(struct{ _ io.Reader }{}), wantPanic: "struct cannot have named fields", reason: "input must not have named fields", }, { label: "IgnoreInterfaces", fnc: IgnoreInterfaces, args: args(struct{ Foo1 }{}), wantPanic: "embedded field must be an interface type", reason: "field types must be interfaces", }, { label: "IgnoreInterfaces", fnc: IgnoreInterfaces, args: args(struct{ EmptyInterface }{}), wantPanic: "cannot ignore empty interface", reason: "field types must not be the empty interface", }, { label: "IgnoreInterfaces", fnc: IgnoreInterfaces, args: args(struct { io.Reader io.Writer io.Closer io.ReadWriteCloser }{}), reason: "multiple interfaces may be specified, even if they overlap", }, { label: "IgnoreUnexported", fnc: IgnoreUnexported, reason: "empty input is valid", }, { label: "IgnoreUnexported", fnc: IgnoreUnexported, args: args(nil), wantPanic: "must be a non-pointer struct", reason: "input must not be nil value", }, { label: "IgnoreUnexported", fnc: IgnoreUnexported, args: args(&Foo1{}), wantPanic: "must be a non-pointer struct", reason: "input must be a struct type (not a pointer to a struct)", }, { label: "IgnoreUnexported", fnc: IgnoreUnexported, args: args(Foo1{}, struct{ x, X int }{}), reason: "input may be named or unnamed structs", }, { label: "AcyclicTransformer", fnc: AcyclicTransformer, args: args("", "not a func"), wantPanic: "invalid transformer function", reason: "AcyclicTransformer has same input requirements as Transformer", }} for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { // Prepare function arguments. vf := reflect.ValueOf(tt.fnc) var vargs []reflect.Value for i, arg := range tt.args { if arg == nil { tf := vf.Type() if i == tf.NumIn()-1 && tf.IsVariadic() { vargs = append(vargs, reflect.Zero(tf.In(i).Elem())) } else { vargs = append(vargs, reflect.Zero(tf.In(i))) } } else { vargs = append(vargs, reflect.ValueOf(arg)) } } // Call the function and capture any panics. var gotPanic string func() { defer func() { if ex := recover(); ex != nil { if s, ok := ex.(string); ok { gotPanic = s } else { panic(ex) } } }() vf.Call(vargs) }() switch { case tt.reason == "": t.Errorf("reason must be provided") case tt.wantPanic == "" && gotPanic != "": t.Errorf("unexpected panic message: %s\nreason: %s", gotPanic, tt.reason) case tt.wantPanic != "" && !strings.Contains(gotPanic, tt.wantPanic): t.Errorf("panic message:\ngot: %s\nwant: %s\nreason: %s", gotPanic, tt.wantPanic, tt.reason) } }) } } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/cmpopts/ignore.go0000644000000000000000000001454415024302467025201 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmpopts import ( "fmt" "reflect" "unicode" "unicode/utf8" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/internal/function" ) // IgnoreFields returns an [cmp.Option] that ignores fields of the // given names on a single struct type. It respects the names of exported fields // that are forwarded due to struct embedding. // The struct type is specified by passing in a value of that type. // // The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a // specific sub-field that is embedded or nested within the parent struct. func IgnoreFields(typ interface{}, names ...string) cmp.Option { sf := newStructFilter(typ, names...) return cmp.FilterPath(sf.filter, cmp.Ignore()) } // IgnoreTypes returns an [cmp.Option] that ignores all values assignable to // certain types, which are specified by passing in a value of each type. func IgnoreTypes(typs ...interface{}) cmp.Option { tf := newTypeFilter(typs...) return cmp.FilterPath(tf.filter, cmp.Ignore()) } type typeFilter []reflect.Type func newTypeFilter(typs ...interface{}) (tf typeFilter) { for _, typ := range typs { t := reflect.TypeOf(typ) if t == nil { // This occurs if someone tries to pass in sync.Locker(nil) panic("cannot determine type; consider using IgnoreInterfaces") } tf = append(tf, t) } return tf } func (tf typeFilter) filter(p cmp.Path) bool { if len(p) < 1 { return false } t := p.Last().Type() for _, ti := range tf { if t.AssignableTo(ti) { return true } } return false } // IgnoreInterfaces returns an [cmp.Option] that ignores all values or references of // values assignable to certain interface types. These interfaces are specified // by passing in an anonymous struct with the interface types embedded in it. // For example, to ignore [sync.Locker], pass in struct{sync.Locker}{}. func IgnoreInterfaces(ifaces interface{}) cmp.Option { tf := newIfaceFilter(ifaces) return cmp.FilterPath(tf.filter, cmp.Ignore()) } type ifaceFilter []reflect.Type func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) { t := reflect.TypeOf(ifaces) if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct { panic("input must be an anonymous struct") } for i := 0; i < t.NumField(); i++ { fi := t.Field(i) switch { case !fi.Anonymous: panic("struct cannot have named fields") case fi.Type.Kind() != reflect.Interface: panic("embedded field must be an interface type") case fi.Type.NumMethod() == 0: // This matches everything; why would you ever want this? panic("cannot ignore empty interface") default: tf = append(tf, fi.Type) } } return tf } func (tf ifaceFilter) filter(p cmp.Path) bool { if len(p) < 1 { return false } t := p.Last().Type() for _, ti := range tf { if t.AssignableTo(ti) { return true } if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) { return true } } return false } // IgnoreUnexported returns an [cmp.Option] that only ignores the immediate unexported // fields of a struct, including anonymous fields of unexported types. // In particular, unexported fields within the struct's exported fields // of struct types, including anonymous fields, will not be ignored unless the // type of the field itself is also passed to IgnoreUnexported. // // Avoid ignoring unexported fields of a type which you do not control (i.e. a // type from another repository), as changes to the implementation of such types // may change how the comparison behaves. Prefer a custom [cmp.Comparer] instead. func IgnoreUnexported(typs ...interface{}) cmp.Option { ux := newUnexportedFilter(typs...) return cmp.FilterPath(ux.filter, cmp.Ignore()) } type unexportedFilter struct{ m map[reflect.Type]bool } func newUnexportedFilter(typs ...interface{}) unexportedFilter { ux := unexportedFilter{m: make(map[reflect.Type]bool)} for _, typ := range typs { t := reflect.TypeOf(typ) if t == nil || t.Kind() != reflect.Struct { panic(fmt.Sprintf("%T must be a non-pointer struct", typ)) } ux.m[t] = true } return ux } func (xf unexportedFilter) filter(p cmp.Path) bool { sf, ok := p.Index(-1).(cmp.StructField) if !ok { return false } return xf.m[p.Index(-2).Type()] && !isExported(sf.Name()) } // isExported reports whether the identifier is exported. func isExported(id string) bool { r, _ := utf8.DecodeRuneInString(id) return unicode.IsUpper(r) } // IgnoreSliceElements returns an [cmp.Option] that ignores elements of []V. // The discard function must be of the form "func(T) bool" which is used to // ignore slice elements of type V, where V is assignable to T. // Elements are ignored if the function reports true. func IgnoreSliceElements(discardFunc interface{}) cmp.Option { vf := reflect.ValueOf(discardFunc) if !function.IsType(vf.Type(), function.ValuePredicate) || vf.IsNil() { panic(fmt.Sprintf("invalid discard function: %T", discardFunc)) } return cmp.FilterPath(func(p cmp.Path) bool { si, ok := p.Index(-1).(cmp.SliceIndex) if !ok { return false } if !si.Type().AssignableTo(vf.Type().In(0)) { return false } vx, vy := si.Values() if vx.IsValid() && vf.Call([]reflect.Value{vx})[0].Bool() { return true } if vy.IsValid() && vf.Call([]reflect.Value{vy})[0].Bool() { return true } return false }, cmp.Ignore()) } // IgnoreMapEntries returns an [cmp.Option] that ignores entries of map[K]V. // The discard function must be of the form "func(T, R) bool" which is used to // ignore map entries of type K and V, where K and V are assignable to T and R. // Entries are ignored if the function reports true. func IgnoreMapEntries(discardFunc interface{}) cmp.Option { vf := reflect.ValueOf(discardFunc) if !function.IsType(vf.Type(), function.KeyValuePredicate) || vf.IsNil() { panic(fmt.Sprintf("invalid discard function: %T", discardFunc)) } return cmp.FilterPath(func(p cmp.Path) bool { mi, ok := p.Index(-1).(cmp.MapIndex) if !ok { return false } if !mi.Key().Type().AssignableTo(vf.Type().In(0)) || !mi.Type().AssignableTo(vf.Type().In(1)) { return false } k := mi.Key() vx, vy := mi.Values() if vx.IsValid() && vf.Call([]reflect.Value{k, vx})[0].Bool() { return true } if vy.IsValid() && vf.Call([]reflect.Value{k, vy})[0].Bool() { return true } return false }, cmp.Ignore()) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/cmpopts/struct_filter.go0000644000000000000000000001222615024302467026602 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmpopts import ( "fmt" "reflect" "strings" "github.com/google/go-cmp/cmp" ) // filterField returns a new Option where opt is only evaluated on paths that // include a specific exported field on a single struct type. // The struct type is specified by passing in a value of that type. // // The name may be a dot-delimited string (e.g., "Foo.Bar") to select a // specific sub-field that is embedded or nested within the parent struct. func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option { // TODO: This is currently unexported over concerns of how helper filters // can be composed together easily. // TODO: Add tests for FilterField. sf := newStructFilter(typ, name) return cmp.FilterPath(sf.filter, opt) } type structFilter struct { t reflect.Type // The root struct type to match on ft fieldTree // Tree of fields to match on } func newStructFilter(typ interface{}, names ...string) structFilter { // TODO: Perhaps allow * as a special identifier to allow ignoring any // number of path steps until the next field match? // This could be useful when a concrete struct gets transformed into // an anonymous struct where it is not possible to specify that by type, // but the transformer happens to provide guarantees about the names of // the transformed fields. t := reflect.TypeOf(typ) if t == nil || t.Kind() != reflect.Struct { panic(fmt.Sprintf("%T must be a non-pointer struct", typ)) } var ft fieldTree for _, name := range names { cname, err := canonicalName(t, name) if err != nil { panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err)) } ft.insert(cname) } return structFilter{t, ft} } func (sf structFilter) filter(p cmp.Path) bool { for i, ps := range p { if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) { return true } } return false } // fieldTree represents a set of dot-separated identifiers. // // For example, inserting the following selectors: // // Foo // Foo.Bar.Baz // Foo.Buzz // Nuka.Cola.Quantum // // Results in a tree of the form: // // {sub: { // "Foo": {ok: true, sub: { // "Bar": {sub: { // "Baz": {ok: true}, // }}, // "Buzz": {ok: true}, // }}, // "Nuka": {sub: { // "Cola": {sub: { // "Quantum": {ok: true}, // }}, // }}, // }} type fieldTree struct { ok bool // Whether this is a specified node sub map[string]fieldTree // The sub-tree of fields under this node } // insert inserts a sequence of field accesses into the tree. func (ft *fieldTree) insert(cname []string) { if ft.sub == nil { ft.sub = make(map[string]fieldTree) } if len(cname) == 0 { ft.ok = true return } sub := ft.sub[cname[0]] sub.insert(cname[1:]) ft.sub[cname[0]] = sub } // matchPrefix reports whether any selector in the fieldTree matches // the start of path p. func (ft fieldTree) matchPrefix(p cmp.Path) bool { for _, ps := range p { switch ps := ps.(type) { case cmp.StructField: ft = ft.sub[ps.Name()] if ft.ok { return true } if len(ft.sub) == 0 { return false } case cmp.Indirect: default: return false } } return false } // canonicalName returns a list of identifiers where any struct field access // through an embedded field is expanded to include the names of the embedded // types themselves. // // For example, suppose field "Foo" is not directly in the parent struct, // but actually from an embedded struct of type "Bar". Then, the canonical name // of "Foo" is actually "Bar.Foo". // // Suppose field "Foo" is not directly in the parent struct, but actually // a field in two different embedded structs of types "Bar" and "Baz". // Then the selector "Foo" causes a panic since it is ambiguous which one it // refers to. The user must specify either "Bar.Foo" or "Baz.Foo". func canonicalName(t reflect.Type, sel string) ([]string, error) { var name string sel = strings.TrimPrefix(sel, ".") if sel == "" { return nil, fmt.Errorf("name must not be empty") } if i := strings.IndexByte(sel, '.'); i < 0 { name, sel = sel, "" } else { name, sel = sel[:i], sel[i:] } // Type must be a struct or pointer to struct. if t.Kind() == reflect.Ptr { t = t.Elem() } if t.Kind() != reflect.Struct { return nil, fmt.Errorf("%v must be a struct", t) } // Find the canonical name for this current field name. // If the field exists in an embedded struct, then it will be expanded. sf, _ := t.FieldByName(name) if !isExported(name) { // Avoid using reflect.Type.FieldByName for unexported fields due to // buggy behavior with regard to embeddeding and unexported fields. // See https://golang.org/issue/4876 for details. sf = reflect.StructField{} for i := 0; i < t.NumField() && sf.Name == ""; i++ { if t.Field(i).Name == name { sf = t.Field(i) } } } if sf.Name == "" { return []string{name}, fmt.Errorf("does not exist") } var ss []string for i := range sf.Index { ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name) } if sel == "" { return ss, nil } ssPost, err := canonicalName(sf.Type, sel) return append(ss, ssPost...), err } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/cmpopts/example_test.go0000644000000000000000000000722615024302467026407 0ustar rootroot// Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cmpopts_test import ( "fmt" "net" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/internal/flags" ) func init() { flags.Deterministic = true } // Use IgnoreFields to ignore fields on a struct type when comparing // by providing a value of the type and the field names to ignore. // Typically, a zero value of the type is used (e.g., foo.MyStruct{}). func ExampleIgnoreFields_testing() { // Let got be the hypothetical value obtained from some logic under test // and want be the expected golden data. got, want := MakeGatewayInfo() // While the specified fields will be semantically ignored for the comparison, // the fields may be printed in the diff when displaying entire values // that are already determined to be different. if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(Client{}, "IPAddress")); diff != "" { t.Errorf("MakeGatewayInfo() mismatch (-want +got):\n%s", diff) } // Output: // MakeGatewayInfo() mismatch (-want +got): // cmpopts_test.Gateway{ // SSID: "CoffeeShopWiFi", // - IPAddress: s"192.168.0.2", // + IPAddress: s"192.168.0.1", // NetMask: s"ffff0000", // Clients: []cmpopts_test.Client{ // ... // 3 identical elements // {Hostname: "espresso", ...}, // {Hostname: "latte", LastSeen: s"2009-11-10 23:00:23 +0000 UTC", ...}, // + { // + Hostname: "americano", // + IPAddress: s"192.168.0.188", // + LastSeen: s"2009-11-10 23:03:05 +0000 UTC", // + }, // }, // } } type ( Gateway struct { SSID string IPAddress net.IP NetMask net.IPMask Clients []Client } Client struct { Hostname string IPAddress net.IP LastSeen time.Time } ) func MakeGatewayInfo() (x, y Gateway) { x = Gateway{ SSID: "CoffeeShopWiFi", IPAddress: net.IPv4(192, 168, 0, 1), NetMask: net.IPv4Mask(255, 255, 0, 0), Clients: []Client{{ Hostname: "ristretto", IPAddress: net.IPv4(192, 168, 0, 116), }, { Hostname: "arabica", IPAddress: net.IPv4(192, 168, 0, 104), LastSeen: time.Date(2009, time.November, 10, 23, 6, 32, 0, time.UTC), }, { Hostname: "macchiato", IPAddress: net.IPv4(192, 168, 0, 153), LastSeen: time.Date(2009, time.November, 10, 23, 39, 43, 0, time.UTC), }, { Hostname: "espresso", IPAddress: net.IPv4(192, 168, 0, 121), }, { Hostname: "latte", IPAddress: net.IPv4(192, 168, 0, 219), LastSeen: time.Date(2009, time.November, 10, 23, 0, 23, 0, time.UTC), }, { Hostname: "americano", IPAddress: net.IPv4(192, 168, 0, 188), LastSeen: time.Date(2009, time.November, 10, 23, 3, 5, 0, time.UTC), }}, } y = Gateway{ SSID: "CoffeeShopWiFi", IPAddress: net.IPv4(192, 168, 0, 2), NetMask: net.IPv4Mask(255, 255, 0, 0), Clients: []Client{{ Hostname: "ristretto", IPAddress: net.IPv4(192, 168, 0, 116), }, { Hostname: "arabica", IPAddress: net.IPv4(192, 168, 0, 104), LastSeen: time.Date(2009, time.November, 10, 23, 6, 32, 0, time.UTC), }, { Hostname: "macchiato", IPAddress: net.IPv4(192, 168, 0, 153), LastSeen: time.Date(2009, time.November, 10, 23, 39, 43, 0, time.UTC), }, { Hostname: "espresso", IPAddress: net.IPv4(192, 168, 0, 121), }, { Hostname: "latte", IPAddress: net.IPv4(192, 168, 0, 221), LastSeen: time.Date(2009, time.November, 10, 23, 0, 23, 0, time.UTC), }}, } return x, y } var t fakeT type fakeT struct{} func (t fakeT) Errorf(format string, args ...interface{}) { fmt.Printf(format+"\n", args...) } dependencies/pkg/mod/github.com/google/go-cmp@v0.7.0/cmp/cmpopts/equate.go0000644000000000000000000001432415024302467025176 0ustar rootroot// Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package cmpopts provides common options for the cmp package. package cmpopts import ( "errors" "fmt" "math" "reflect" "time" "github.com/google/go-cmp/cmp" ) func equateAlways(_, _ interface{}) bool { return true } // EquateEmpty returns a [cmp.Comparer] option that determines all maps and slices // with a length of zero to be equal, regardless of whether they are nil. // // EquateEmpty can be used in conjunction with [SortSlices] and [SortMaps]. func EquateEmpty() cmp.Option { return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) } func isEmpty(x, y interface{}) bool { vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) return (x != nil && y != nil && vx.Type() == vy.Type()) && (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && (vx.Len() == 0 && vy.Len() == 0) } // EquateApprox returns a [cmp.Comparer] option that determines float32 or float64 // values to be equal if they are within a relative fraction or absolute margin. // This option is not used when either x or y is NaN or infinite. // // The fraction determines that the difference of two values must be within the // smaller fraction of the two values, while the margin determines that the two // values must be within some absolute margin. // To express only a fraction or only a margin, use 0 for the other parameter. // The fraction and margin must be non-negative. // // The mathematical expression used is equivalent to: // // |x-y| ≤ max(fraction*min(|x|, |y|), margin) // // EquateApprox can be used in conjunction with [EquateNaNs]. func EquateApprox(fraction, margin float64) cmp.Option { if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { panic("margin or fraction must be a non-negative number") } a := approximator{fraction, margin} return cmp.Options{ cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)), cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)), } } type approximator struct{ frac, marg float64 } func areRealF64s(x, y float64) bool { return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0) } func areRealF32s(x, y float32) bool { return areRealF64s(float64(x), float64(y)) } func (a approximator) compareF64(x, y float64) bool { relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y)) return math.Abs(x-y) <= math.Max(a.marg, relMarg) } func (a approximator) compareF32(x, y float32) bool { return a.compareF64(float64(x), float64(y)) } // EquateNaNs returns a [cmp.Comparer] option that determines float32 and float64 // NaN values to be equal. // // EquateNaNs can be used in conjunction with [EquateApprox]. func EquateNaNs() cmp.Option { return cmp.Options{ cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)), } } func areNaNsF64s(x, y float64) bool { return math.IsNaN(x) && math.IsNaN(y) } func areNaNsF32s(x, y float32) bool { return areNaNsF64s(float64(x), float64(y)) } // EquateApproxTime returns a [cmp.Comparer] option that determines two non-zero // [time.Time] values to be equal if they are within some margin of one another. // If both times have a monotonic clock reading, then the monotonic time // difference will be used. The margin must be non-negative. func EquateApproxTime(margin time.Duration) cmp.Option { if margin < 0 { panic("margin must be a non-negative number") } a := timeApproximator{margin} return cmp.FilterValues(areNonZeroTimes, cmp.Comparer(a.compare)) } func areNonZeroTimes(x, y time.Time) bool { return !x.IsZero() && !y.IsZero() } type timeApproximator struct { margin time.Duration } func (a timeApproximator) compare(x, y time.Time) bool { // Avoid subtracting times to avoid overflow when the // difference is larger than the largest representable duration. if x.After(y) { // Ensure x is always before y x, y = y, x } // We're within the margin if x+margin >= y. // Note: time.Time doesn't have AfterOrEqual method hence the negation. return !x.Add(a.margin).Before(y) } // AnyError is an error that matches any non-nil error. var AnyError anyError type anyError struct{} func (anyError) Error() string { return "any error" } func (anyError) Is(err error) bool { return err != nil } // EquateErrors returns a [cmp.Comparer] option that determines errors to be equal // if [errors.Is] reports them to match. The [AnyError] error can be used to // match any non-nil error. func EquateErrors() cmp.Option { return cmp.FilterValues(areConcreteErrors, cmp.Comparer(compareErrors)) } // areConcreteErrors reports whether x and y are types that implement error. // The input types are deliberately of the interface{} type rather than the // error type so that we can handle situations where the current type is an // interface{}, but the underlying concrete types both happen to implement // the error interface. func areConcreteErrors(x, y interface{}) bool { _, ok1 := x.(error) _, ok2 := y.(error) return ok1 && ok2 } func compareErrors(x, y interface{}) bool { xe := x.(error) ye := y.(error) return errors.Is(xe, ye) || errors.Is(ye, xe) } // EquateComparable returns a [cmp.Option] that determines equality // of comparable types by directly comparing them using the == operator in Go. // The types to compare are specified by passing a value of that type. // This option should only be used on types that are documented as being // safe for direct == comparison. For example, [net/netip.Addr] is documented // as being semantically safe to use with ==, while [time.Time] is documented // to discourage the use of == on time values. func EquateComparable(typs ...interface{}) cmp.Option { types := make(typesFilter) for _, typ := range typs { switch t := reflect.TypeOf(typ); { case !t.Comparable(): panic(fmt.Sprintf("%T is not a comparable Go type", typ)) case types[t]: panic(fmt.Sprintf("%T is already specified", typ)) default: types[t] = true } } return cmp.FilterPath(types.filter, cmp.Comparer(equateAny)) } type typesFilter map[reflect.Type]bool func (tf typesFilter) filter(p cmp.Path) bool { return tf[p.Last().Type()] } func equateAny(x, y interface{}) bool { return x == y } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/0000755000000000000000000000000015024302467020677 5ustar rootrootdependencies/pkg/mod/github.com/google/uuid@v1.6.0/version1.go0000644000000000000000000000235115024302467022775 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "encoding/binary" ) // NewUUID returns a Version 1 UUID based on the current NodeID and clock // sequence, and the current time. If the NodeID has not been set by SetNodeID // or SetNodeInterface then it will be set automatically. If the NodeID cannot // be set NewUUID returns nil. If clock sequence has not been set by // SetClockSequence then it will be set automatically. If GetTime fails to // return the current NewUUID returns nil and an error. // // In most cases, New should be used. func NewUUID() (UUID, error) { var uuid UUID now, seq, err := GetTime() if err != nil { return uuid, err } timeLow := uint32(now & 0xffffffff) timeMid := uint16((now >> 32) & 0xffff) timeHi := uint16((now >> 48) & 0x0fff) timeHi |= 0x1000 // Version 1 binary.BigEndian.PutUint32(uuid[0:], timeLow) binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[8:], seq) nodeMu.Lock() if nodeID == zeroID { setNodeInterface("") } copy(uuid[10:], nodeID[:]) nodeMu.Unlock() return uuid, nil } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/json_test.go0000644000000000000000000000466515024302467023251 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "encoding/json" "reflect" "testing" ) var testUUID = Must(Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")) func TestJSON(t *testing.T) { type S struct { ID1 UUID ID2 UUID } s1 := S{ID1: testUUID} data, err := json.Marshal(&s1) if err != nil { t.Fatal(err) } var s2 S if err := json.Unmarshal(data, &s2); err != nil { t.Fatal(err) } if !reflect.DeepEqual(&s1, &s2) { t.Errorf("got %#v, want %#v", s2, s1) } } func TestJSONUnmarshal(t *testing.T) { type S struct { ID1 UUID ID2 UUID `json:"ID2,omitempty"` } testCases := map[string]struct { data []byte expectedError error expectedResult UUID }{ "success": { data: []byte(`{"ID1": "f47ac10b-58cc-0372-8567-0e02b2c3d479"}`), expectedError: nil, expectedResult: testUUID, }, "zero": { data: []byte(`{"ID1": "00000000-0000-0000-0000-000000000000"}`), expectedError: nil, expectedResult: Nil, }, "null": { data: []byte(`{"ID1": null}`), expectedError: nil, expectedResult: Nil, }, "empty": { data: []byte(`{"ID1": ""}`), expectedError: invalidLengthError{len: 0}, expectedResult: Nil, }, "omitempty": { data: []byte(`{"ID2": ""}`), expectedError: invalidLengthError{len: 0}, expectedResult: Nil, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var s S if err := json.Unmarshal(tc.data, &s); err != tc.expectedError { t.Errorf("unexpected error: got %v, want %v", err, tc.expectedError) } if !reflect.DeepEqual(s.ID1, tc.expectedResult) { t.Errorf("got %#v, want %#v", s.ID1, tc.expectedResult) } }) } } func BenchmarkUUID_MarshalJSON(b *testing.B) { x := &struct { UUID UUID `json:"uuid"` }{} var err error x.UUID, err = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") if err != nil { b.Fatal(err) } for i := 0; i < b.N; i++ { js, err := json.Marshal(x) if err != nil { b.Fatalf("marshal json: %#v (%v)", js, err) } } } func BenchmarkUUID_UnmarshalJSON(b *testing.B) { js := []byte(`{"uuid":"f47ac10b-58cc-0372-8567-0e02b2c3d479"}`) var x *struct { UUID UUID `json:"uuid"` } for i := 0; i < b.N; i++ { err := json.Unmarshal(js, &x) if err != nil { b.Fatalf("marshal json: %#v (%v)", js, err) } } } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_js.go0000644000000000000000000000076215024302467022654 0ustar rootroot// Copyright 2017 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build js package uuid // getHardwareInterface returns nil values for the JS version of the code. // This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/LICENSE0000644000000000000000000000271015024302467021704 0ustar rootrootCopyright (c) 2009,2014 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. dependencies/pkg/mod/github.com/google/uuid@v1.6.0/dce.go0000644000000000000000000000403015024302467021756 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "encoding/binary" "fmt" "os" ) // A Domain represents a Version 2 domain type Domain byte // Domain constants for DCE Security (Version 2) UUIDs. const ( Person = Domain(0) Group = Domain(1) Org = Domain(2) ) // NewDCESecurity returns a DCE Security (Version 2) UUID. // // The domain should be one of Person, Group or Org. // On a POSIX system the id should be the users UID for the Person // domain and the users GID for the Group. The meaning of id for // the domain Org or on non-POSIX systems is site defined. // // For a given domain/id pair the same token may be returned for up to // 7 minutes and 10 seconds. func NewDCESecurity(domain Domain, id uint32) (UUID, error) { uuid, err := NewUUID() if err == nil { uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 uuid[9] = byte(domain) binary.BigEndian.PutUint32(uuid[0:], id) } return uuid, err } // NewDCEPerson returns a DCE Security (Version 2) UUID in the person // domain with the id returned by os.Getuid. // // NewDCESecurity(Person, uint32(os.Getuid())) func NewDCEPerson() (UUID, error) { return NewDCESecurity(Person, uint32(os.Getuid())) } // NewDCEGroup returns a DCE Security (Version 2) UUID in the group // domain with the id returned by os.Getgid. // // NewDCESecurity(Group, uint32(os.Getgid())) func NewDCEGroup() (UUID, error) { return NewDCESecurity(Group, uint32(os.Getgid())) } // Domain returns the domain for a Version 2 UUID. Domains are only defined // for Version 2 UUIDs. func (uuid UUID) Domain() Domain { return Domain(uuid[9]) } // ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 // UUIDs. func (uuid UUID) ID() uint32 { return binary.BigEndian.Uint32(uuid[0:4]) } func (d Domain) String() string { switch d { case Person: return "Person" case Group: return "Group" case Org: return "Org" } return fmt.Sprintf("Domain%d", int(d)) } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version6.go0000644000000000000000000000424515024302467023006 0ustar rootroot// Copyright 2023 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import "encoding/binary" // UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. // It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. // Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. // // see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 // // NewV6 returns a Version 6 UUID based on the current NodeID and clock // sequence, and the current time. If the NodeID has not been set by SetNodeID // or SetNodeInterface then it will be set automatically. If the NodeID cannot // be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by // SetClockSequence then it will be set automatically. If GetTime fails to // return the current NewV6 returns Nil and an error. func NewV6() (UUID, error) { var uuid UUID now, seq, err := GetTime() if err != nil { return uuid, err } /* 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | time_high | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | time_mid | time_low_and_version | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |clk_seq_hi_res | clk_seq_low | node (0-1) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | node (2-5) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ binary.BigEndian.PutUint64(uuid[0:], uint64(now)) binary.BigEndian.PutUint16(uuid[8:], seq) uuid[6] = 0x60 | (uuid[6] & 0x0F) uuid[8] = 0x80 | (uuid[8] & 0x3F) nodeMu.Lock() if nodeID == zeroID { setNodeInterface("") } copy(uuid[10:], nodeID[:]) nodeMu.Unlock() return uuid, nil } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid_test.go0000644000000000000000000005536715024302467023253 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "bytes" "errors" "fmt" "os" "runtime" "strings" "testing" "time" "unsafe" ) type test struct { in string version Version variant Variant isuuid bool } var tests = []test{ {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true}, {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true}, {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true}, {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true}, {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true}, {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true}, {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true}, {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true}, {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true}, {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true}, {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true}, {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true}, {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true}, {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true}, {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true}, {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true}, {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true}, {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true}, {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true}, {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true}, {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true}, {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true}, {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true}, {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false}, {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false}, {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false}, {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false}, {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false}, {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false}, {"{f47ac10b-58cc-0372-8567-0e02b2c3d479}", 0, RFC4122, true}, {"{f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, Invalid, false}, {"f47ac10b-58cc-0372-8567-0e02b2c3d479}", 0, Invalid, false}, {"f47ac10b58cc037285670e02b2c3d479", 0, RFC4122, true}, {"f47ac10b58cc037285670e02b2c3d4790", 0, Invalid, false}, {"f47ac10b58cc037285670e02b2c3d47", 0, Invalid, false}, {"01ee836c-e7c9-619d-929a-525400475911", 6, RFC4122, true}, {"018bd12c-58b0-7683-8a5b-8752d0e86651", 7, RFC4122, true}, } var constants = []struct { c interface{} name string }{ {Person, "Person"}, {Group, "Group"}, {Org, "Org"}, {Invalid, "Invalid"}, {RFC4122, "RFC4122"}, {Reserved, "Reserved"}, {Microsoft, "Microsoft"}, {Future, "Future"}, {Domain(17), "Domain17"}, {Variant(42), "BadVariant42"}, } func testTest(t *testing.T, in string, tt test) { uuid, err := Parse(in) if ok := (err == nil); ok != tt.isuuid { t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid) } if err != nil { return } if v := uuid.Variant(); v != tt.variant { t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant) } if v := uuid.Version(); v != tt.version { t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version) } } func testBytes(t *testing.T, in []byte, tt test) { uuid, err := ParseBytes(in) if ok := (err == nil); ok != tt.isuuid { t.Errorf("ParseBytes(%s) got %v expected %v\b", in, ok, tt.isuuid) } if err != nil { return } suuid, _ := Parse(string(in)) if uuid != suuid { t.Errorf("ParseBytes(%s) got %v expected %v\b", in, uuid, suuid) } } func TestUUID(t *testing.T) { for _, tt := range tests { testTest(t, tt.in, tt) testTest(t, strings.ToUpper(tt.in), tt) testBytes(t, []byte(tt.in), tt) } } func TestFromBytes(t *testing.T) { b := []byte{ 0x7d, 0x44, 0x48, 0x40, 0x9d, 0xc0, 0x11, 0xd1, 0xb2, 0x45, 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, } uuid, err := FromBytes(b) if err != nil { t.Fatalf("%s", err) } for i := 0; i < len(uuid); i++ { if b[i] != uuid[i] { t.Fatalf("FromBytes() got %v expected %v\b", uuid[:], b) } } } func TestConstants(t *testing.T) { for x, tt := range constants { v, ok := tt.c.(fmt.Stringer) if !ok { t.Errorf("%x: %v: not a stringer", x, v) } else if s := v.String(); s != tt.name { v, _ := tt.c.(int) t.Errorf("%x: Constant %T:%d gives %q, expected %q", x, tt.c, v, s, tt.name) } } } func TestRandomUUID(t *testing.T) { m := make(map[string]bool) for x := 1; x < 32; x++ { uuid := New() s := uuid.String() if m[s] { t.Errorf("NewRandom returned duplicated UUID %s", s) } m[s] = true if v := uuid.Version(); v != 4 { t.Errorf("Random UUID of version %s", v) } if uuid.Variant() != RFC4122 { t.Errorf("Random UUID is variant %d", uuid.Variant()) } } } func TestRandomUUID_Pooled(t *testing.T) { defer DisableRandPool() EnableRandPool() m := make(map[string]bool) for x := 1; x < 128; x++ { uuid := New() s := uuid.String() if m[s] { t.Errorf("NewRandom returned duplicated UUID %s", s) } m[s] = true if v := uuid.Version(); v != 4 { t.Errorf("Random UUID of version %s", v) } if uuid.Variant() != RFC4122 { t.Errorf("Random UUID is variant %d", uuid.Variant()) } } } func TestNew(t *testing.T) { m := make(map[UUID]bool) for x := 1; x < 32; x++ { s := New() if m[s] { t.Errorf("New returned duplicated UUID %s", s) } m[s] = true uuid, err := Parse(s.String()) if err != nil { t.Errorf("New.String() returned %q which does not decode", s) continue } if v := uuid.Version(); v != 4 { t.Errorf("Random UUID of version %s", v) } if uuid.Variant() != RFC4122 { t.Errorf("Random UUID is variant %d", uuid.Variant()) } } } func TestClockSeq(t *testing.T) { // Fake time.Now for this test to return a monotonically advancing time; restore it at end. defer func(orig func() time.Time) { timeNow = orig }(timeNow) monTime := time.Now() timeNow = func() time.Time { monTime = monTime.Add(1 * time.Second) return monTime } SetClockSequence(-1) uuid1, err := NewUUID() if err != nil { t.Fatalf("could not create UUID: %v", err) } uuid2, err := NewUUID() if err != nil { t.Fatalf("could not create UUID: %v", err) } if s1, s2 := uuid1.ClockSequence(), uuid2.ClockSequence(); s1 != s2 { t.Errorf("clock sequence %d != %d", s1, s2) } SetClockSequence(-1) uuid2, err = NewUUID() if err != nil { t.Fatalf("could not create UUID: %v", err) } // Just on the very off chance we generated the same sequence // two times we try again. if uuid1.ClockSequence() == uuid2.ClockSequence() { SetClockSequence(-1) uuid2, err = NewUUID() if err != nil { t.Fatalf("could not create UUID: %v", err) } } if s1, s2 := uuid1.ClockSequence(), uuid2.ClockSequence(); s1 == s2 { t.Errorf("Duplicate clock sequence %d", s1) } SetClockSequence(0x1234) uuid1, err = NewUUID() if err != nil { t.Fatalf("could not create UUID: %v", err) } if seq := uuid1.ClockSequence(); seq != 0x1234 { t.Errorf("%s: expected seq 0x1234 got 0x%04x", uuid1, seq) } } func TestCoding(t *testing.T) { text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" data := UUID{ 0x7d, 0x44, 0x48, 0x40, 0x9d, 0xc0, 0x11, 0xd1, 0xb2, 0x45, 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, } if v := data.String(); v != text { t.Errorf("%x: encoded to %s, expected %s", data, v, text) } if v := data.URN(); v != urn { t.Errorf("%x: urn is %s, expected %s", data, v, urn) } uuid, err := Parse(text) if err != nil { t.Errorf("Parse returned unexpected error %v", err) } if data != uuid { t.Errorf("%s: decoded to %s, expected %s", text, uuid, data) } } func TestVersion1(t *testing.T) { uuid1, err := NewUUID() if err != nil { t.Fatalf("could not create UUID: %v", err) } uuid2, err := NewUUID() if err != nil { t.Fatalf("could not create UUID: %v", err) } if uuid1 == uuid2 { t.Errorf("%s:duplicate uuid", uuid1) } if v := uuid1.Version(); v != 1 { t.Errorf("%s: version %s expected 1", uuid1, v) } if v := uuid2.Version(); v != 1 { t.Errorf("%s: version %s expected 1", uuid2, v) } n1 := uuid1.NodeID() n2 := uuid2.NodeID() if !bytes.Equal(n1, n2) { t.Errorf("Different nodes %x != %x", n1, n2) } t1 := uuid1.Time() t2 := uuid2.Time() q1 := uuid1.ClockSequence() q2 := uuid2.ClockSequence() switch { case t1 == t2 && q1 == q2: t.Error("time stopped") case t1 > t2 && q1 == q2: t.Error("time reversed") case t1 < t2 && q1 != q2: t.Error("clock sequence changed unexpectedly") } } func TestNode(t *testing.T) { // This test is mostly to make sure we don't leave nodeMu locked. ifname = "" if ni := NodeInterface(); ni != "" { t.Errorf("NodeInterface got %q, want %q", ni, "") } if SetNodeInterface("xyzzy") { t.Error("SetNodeInterface succeeded on a bad interface name") } if !SetNodeInterface("") { t.Error("SetNodeInterface failed") } if runtime.GOARCH != "js" { if ni := NodeInterface(); ni == "" { t.Error("NodeInterface returned an empty string") } } ni := NodeID() if len(ni) != 6 { t.Errorf("ni got %d bytes, want 6", len(ni)) } hasData := false for _, b := range ni { if b != 0 { hasData = true } } if !hasData { t.Error("nodeid is all zeros") } id := []byte{1, 2, 3, 4, 5, 6, 7, 8} SetNodeID(id) ni = NodeID() if !bytes.Equal(ni, id[:6]) { t.Errorf("got nodeid %v, want %v", ni, id[:6]) } if ni := NodeInterface(); ni != "user" { t.Errorf("got interface %q, want %q", ni, "user") } } func TestNodeAndTime(t *testing.T) { // Time is February 5, 1998 12:30:23.136364800 AM GMT uuid, err := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") if err != nil { t.Fatalf("Parser returned unexpected error %v", err) } node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} ts := uuid.Time() c := time.Unix(ts.UnixTime()) want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) if !c.Equal(want) { t.Errorf("Got time %v, want %v", c, want) } if !bytes.Equal(node, uuid.NodeID()) { t.Errorf("Expected node %v got %v", node, uuid.NodeID()) } } func TestMD5(t *testing.T) { uuid := NewMD5(NameSpaceDNS, []byte("python.org")).String() want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" if uuid != want { t.Errorf("MD5: got %q expected %q", uuid, want) } } func TestSHA1(t *testing.T) { uuid := NewSHA1(NameSpaceDNS, []byte("python.org")).String() want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" if uuid != want { t.Errorf("SHA1: got %q expected %q", uuid, want) } } func TestNodeID(t *testing.T) { nid := []byte{1, 2, 3, 4, 5, 6} SetNodeInterface("") s := NodeInterface() if runtime.GOARCH != "js" { if s == "" || s == "user" { t.Errorf("NodeInterface %q after SetInterface", s) } } node1 := NodeID() if node1 == nil { t.Error("NodeID nil after SetNodeInterface", s) } SetNodeID(nid) s = NodeInterface() if s != "user" { t.Errorf("Expected NodeInterface %q got %q", "user", s) } node2 := NodeID() if node2 == nil { t.Error("NodeID nil after SetNodeID", s) } if bytes.Equal(node1, node2) { t.Error("NodeID not changed after SetNodeID", s) } else if !bytes.Equal(nid, node2) { t.Errorf("NodeID is %x, expected %x", node2, nid) } } func testDCE(t *testing.T, name string, uuid UUID, err error, domain Domain, id uint32) { if err != nil { t.Errorf("%s failed: %v", name, err) return } if v := uuid.Version(); v != 2 { t.Errorf("%s: %s: expected version 2, got %s", name, uuid, v) return } if v := uuid.Domain(); v != domain { t.Errorf("%s: %s: expected domain %d, got %d", name, uuid, domain, v) } if v := uuid.ID(); v != id { t.Errorf("%s: %s: expected id %d, got %d", name, uuid, id, v) } } func TestDCE(t *testing.T) { uuid, err := NewDCESecurity(42, 12345678) testDCE(t, "NewDCESecurity", uuid, err, 42, 12345678) uuid, err = NewDCEPerson() testDCE(t, "NewDCEPerson", uuid, err, Person, uint32(os.Getuid())) uuid, err = NewDCEGroup() testDCE(t, "NewDCEGroup", uuid, err, Group, uint32(os.Getgid())) } type badRand struct{} func (r badRand) Read(buf []byte) (int, error) { for i := range buf { buf[i] = byte(i) } return len(buf), nil } func TestBadRand(t *testing.T) { SetRand(badRand{}) uuid1 := New() uuid2 := New() if uuid1 != uuid2 { t.Errorf("expected duplicates, got %q and %q", uuid1, uuid2) } SetRand(nil) uuid1 = New() uuid2 = New() if uuid1 == uuid2 { t.Errorf("unexpected duplicates, got %q", uuid1) } } func TestSetRand(t *testing.T) { myString := "805-9dd6-1a877cb526c678e71d38-7122-44c0-9b7c-04e7001cc78783ac3e82-47a3-4cc3-9951-13f3339d88088f5d685a-11f7-4078-ada9-de44ad2daeb7" SetRand(strings.NewReader(myString)) uuid1 := New() uuid2 := New() SetRand(strings.NewReader(myString)) uuid3 := New() uuid4 := New() if uuid1 != uuid3 { t.Errorf("expected duplicates, got %q and %q", uuid1, uuid3) } if uuid2 != uuid4 { t.Errorf("expected duplicates, got %q and %q", uuid2, uuid4) } } func TestRandomFromReader(t *testing.T) { myString := "8059ddhdle77cb52" r := bytes.NewReader([]byte(myString)) r2 := bytes.NewReader([]byte(myString)) uuid1, err := NewRandomFromReader(r) if err != nil { t.Errorf("failed generating UUID from a reader") } _, err = NewRandomFromReader(r) if err == nil { t.Errorf("expecting an error as reader has no more bytes. Got uuid. NewRandomFromReader may not be using the provided reader") } uuid3, err := NewRandomFromReader(r2) if err != nil { t.Errorf("failed generating UUID from a reader") } if uuid1 != uuid3 { t.Errorf("expected duplicates, got %q and %q", uuid1, uuid3) } } func TestRandPool(t *testing.T) { myString := "8059ddhdle77cb52" EnableRandPool() SetRand(strings.NewReader(myString)) _, err := NewRandom() if err == nil { t.Errorf("expecting an error as reader has no more bytes") } DisableRandPool() SetRand(strings.NewReader(myString)) _, err = NewRandom() if err != nil { t.Errorf("failed generating UUID from a reader") } } func TestWrongLength(t *testing.T) { _, err := Parse("12345") if err == nil { t.Errorf("expected ‘12345’ was invalid") } else if err.Error() != "invalid UUID length: 5" { t.Errorf("expected a different error message for an invalid length") } } func TestIsWrongLength(t *testing.T) { _, err := Parse("12345") if !IsInvalidLengthError(err) { t.Errorf("expected error type is invalidLengthError") } } func FuzzParse(f *testing.F) { for _, tt := range tests { f.Add(tt.in) f.Add(strings.ToUpper(tt.in)) } f.Fuzz(func(t *testing.T, in string) { Parse(in) }) } func FuzzParseBytes(f *testing.F) { for _, tt := range tests { f.Add([]byte(tt.in)) } f.Fuzz(func(t *testing.T, in []byte) { ParseBytes(in) }) } func FuzzFromBytes(f *testing.F) { // Copied from TestFromBytes. f.Add([]byte{ 0x7d, 0x44, 0x48, 0x40, 0x9d, 0xc0, 0x11, 0xd1, 0xb2, 0x45, 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, }) f.Fuzz(func(t *testing.T, in []byte) { FromBytes(in) }) } // TestValidate checks various scenarios for the Validate function func TestValidate(t *testing.T) { testCases := []struct { name string input string expect error }{ {"Valid UUID", "123e4567-e89b-12d3-a456-426655440000", nil}, {"Valid UUID with URN", "urn:uuid:123e4567-e89b-12d3-a456-426655440000", nil}, {"Valid UUID with Braces", "{123e4567-e89b-12d3-a456-426655440000}", nil}, {"Valid UUID No Hyphens", "123e4567e89b12d3a456426655440000", nil}, {"Invalid UUID", "invalid-uuid", errors.New("invalid UUID length: 12")}, {"Invalid Length", "123", fmt.Errorf("invalid UUID length: %d", len("123"))}, {"Invalid URN Prefix", "urn:test:123e4567-e89b-12d3-a456-426655440000", fmt.Errorf("invalid urn prefix: %q", "urn:test:")}, {"Invalid Brackets", "[123e4567-e89b-12d3-a456-426655440000]", fmt.Errorf("invalid bracketed UUID format")}, {"Invalid UUID Format", "12345678gabc1234abcd1234abcd1234", fmt.Errorf("invalid UUID format")}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { err := Validate(tc.input) if (err != nil) != (tc.expect != nil) || (err != nil && err.Error() != tc.expect.Error()) { t.Errorf("Validate(%q) = %v, want %v", tc.input, err, tc.expect) } }) } } var asString = "f47ac10b-58cc-0372-8567-0e02b2c3d479" var asBytes = []byte(asString) func BenchmarkParse(b *testing.B) { for i := 0; i < b.N; i++ { _, err := Parse(asString) if err != nil { b.Fatal(err) } } } func BenchmarkParseBytes(b *testing.B) { for i := 0; i < b.N; i++ { _, err := ParseBytes(asBytes) if err != nil { b.Fatal(err) } } } // parseBytesUnsafe is to benchmark using unsafe. func parseBytesUnsafe(b []byte) (UUID, error) { return Parse(*(*string)(unsafe.Pointer(&b))) } func BenchmarkParseBytesUnsafe(b *testing.B) { for i := 0; i < b.N; i++ { _, err := parseBytesUnsafe(asBytes) if err != nil { b.Fatal(err) } } } // parseBytesCopy is to benchmark not using unsafe. func parseBytesCopy(b []byte) (UUID, error) { return Parse(string(b)) } func BenchmarkParseBytesCopy(b *testing.B) { for i := 0; i < b.N; i++ { _, err := parseBytesCopy(asBytes) if err != nil { b.Fatal(err) } } } func BenchmarkNew(b *testing.B) { for i := 0; i < b.N; i++ { New() } } func BenchmarkUUID_String(b *testing.B) { uuid, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") if err != nil { b.Fatal(err) } for i := 0; i < b.N; i++ { if uuid.String() == "" { b.Fatal("invalid uuid") } } } func BenchmarkUUID_URN(b *testing.B) { uuid, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") if err != nil { b.Fatal(err) } for i := 0; i < b.N; i++ { if uuid.URN() == "" { b.Fatal("invalid uuid") } } } func BenchmarkParseBadLength(b *testing.B) { short := asString[:10] for i := 0; i < b.N; i++ { _, err := Parse(short) if err == nil { b.Fatalf("expected ‘%s’ was invalid", short) } } } func BenchmarkParseLen32Truncated(b *testing.B) { partial := asString[:len(asString)-4] for i := 0; i < b.N; i++ { _, err := Parse(partial) if err == nil { b.Fatalf("expected ‘%s’ was invalid", partial) } } } func BenchmarkParseLen36Corrupted(b *testing.B) { wrong := asString[:len(asString)-1] + "x" for i := 0; i < b.N; i++ { _, err := Parse(wrong) if err == nil { b.Fatalf("expected ‘%s’ was invalid", wrong) } } } func BenchmarkUUID_New(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { _, err := NewRandom() if err != nil { b.Fatal(err) } } }) } func BenchmarkUUID_NewPooled(b *testing.B) { EnableRandPool() b.RunParallel(func(pb *testing.PB) { for pb.Next() { _, err := NewRandom() if err != nil { b.Fatal(err) } } }) } func BenchmarkUUIDs_Strings(b *testing.B) { uuid1, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") if err != nil { b.Fatal(err) } uuid2, err := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") if err != nil { b.Fatal(err) } uuids := UUIDs{uuid1, uuid2} for i := 0; i < b.N; i++ { uuids.Strings() } } func TestVersion6(t *testing.T) { uuid1, err := NewV6() if err != nil { t.Fatalf("could not create UUID: %v", err) } uuid2, err := NewV6() if err != nil { t.Fatalf("could not create UUID: %v", err) } if uuid1 == uuid2 { t.Errorf("%s:duplicate uuid", uuid1) } if v := uuid1.Version(); v != 6 { t.Errorf("%s: version %s expected 6", uuid1, v) } if v := uuid2.Version(); v != 6 { t.Errorf("%s: version %s expected 6", uuid2, v) } n1 := uuid1.NodeID() n2 := uuid2.NodeID() if !bytes.Equal(n1, n2) { t.Errorf("Different nodes %x != %x", n1, n2) } t1 := uuid1.Time() t2 := uuid2.Time() q1 := uuid1.ClockSequence() q2 := uuid2.ClockSequence() switch { case t1 == t2 && q1 == q2: t.Error("time stopped") case t1 > t2 && q1 == q2: t.Error("time reversed") case t1 < t2 && q1 != q2: t.Error("clock sequence changed unexpectedly") } } // uuid v7 time is only unix milliseconds, so // uuid1.Time() == uuid2.Time() is right, but uuid1 must != uuid2 func TestVersion7(t *testing.T) { SetRand(nil) m := make(map[string]bool) for x := 1; x < 128; x++ { uuid, err := NewV7() if err != nil { t.Fatalf("could not create UUID: %v", err) } s := uuid.String() if m[s] { t.Errorf("NewV7 returned duplicated UUID %s", s) } m[s] = true if v := uuid.Version(); v != 7 { t.Errorf("UUID of version %s", v) } if uuid.Variant() != RFC4122 { t.Errorf("UUID is variant %d", uuid.Variant()) } } } // uuid v7 time is only unix milliseconds, so // uuid1.Time() == uuid2.Time() is right, but uuid1 must != uuid2 func TestVersion7_pooled(t *testing.T) { SetRand(nil) EnableRandPool() defer DisableRandPool() m := make(map[string]bool) for x := 1; x < 128; x++ { uuid, err := NewV7() if err != nil { t.Fatalf("could not create UUID: %v", err) } s := uuid.String() if m[s] { t.Errorf("NewV7 returned duplicated UUID %s", s) } m[s] = true if v := uuid.Version(); v != 7 { t.Errorf("UUID of version %s", v) } if uuid.Variant() != RFC4122 { t.Errorf("UUID is variant %d", uuid.Variant()) } } } func TestVersion7FromReader(t *testing.T) { myString := "8059ddhdle77cb52" r := bytes.NewReader([]byte(myString)) _, err := NewV7FromReader(r) if err != nil { t.Errorf("failed generating UUID from a reader") } _, err = NewV7FromReader(r) if err == nil { t.Errorf("expecting an error as reader has no more bytes. Got uuid. NewV7FromReader may not be using the provided reader") } } func TestVersion7Monotonicity(t *testing.T) { length := 10000 u1 := Must(NewV7()).String() for i := 0; i < length; i++ { u2 := Must(NewV7()).String() if u2 <= u1 { t.Errorf("monotonicity failed at #%d: %s(next) < %s(before)", i, u2, u1) break } u1 = u2 } } type fakeRand struct{} func (g fakeRand) Read(bs []byte) (int, error) { for i, _ := range bs { bs[i] = 0x88 } return len(bs), nil } func TestVersion7MonotonicityStrict(t *testing.T) { timeNow = func() time.Time { return time.Date(2008, 8, 8, 8, 8, 8, 8, time.UTC) } defer func() { timeNow = time.Now }() SetRand(fakeRand{}) defer SetRand(nil) length := 100000 // > 3906 u1 := Must(NewV7()).String() for i := 0; i < length; i++ { u2 := Must(NewV7()).String() if u2 <= u1 { t.Errorf("monotonicity failed at #%d: %s(next) < %s(before)", i, u2, u1) break } u1 = u2 } } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql.go0000644000000000000000000000266315024302467022034 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "database/sql/driver" "fmt" ) // Scan implements sql.Scanner so UUIDs can be read from databases transparently. // Currently, database types that map to string and []byte are supported. Please // consult database-specific driver documentation for matching types. func (uuid *UUID) Scan(src interface{}) error { switch src := src.(type) { case nil: return nil case string: // if an empty UUID comes from a table, we return a null UUID if src == "" { return nil } // see Parse for required string format u, err := Parse(src) if err != nil { return fmt.Errorf("Scan: %v", err) } *uuid = u case []byte: // if an empty UUID comes from a table, we return a null UUID if len(src) == 0 { return nil } // assumes a simple slice of bytes if 16 bytes // otherwise attempts to parse if len(src) != 16 { return uuid.Scan(string(src)) } copy((*uuid)[:], src) default: return fmt.Errorf("Scan: unable to scan type %T into UUID", src) } return nil } // Value implements sql.Valuer so that UUIDs can be written to databases // transparently. Currently, UUIDs map to strings. Please consult // database-specific driver documentation for matching types. func (uuid UUID) Value() (driver.Value, error) { return uuid.String(), nil } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/marshal.go0000644000000000000000000000161315024302467022656 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import "fmt" // MarshalText implements encoding.TextMarshaler. func (uuid UUID) MarshalText() ([]byte, error) { var js [36]byte encodeHex(js[:], uuid) return js[:], nil } // UnmarshalText implements encoding.TextUnmarshaler. func (uuid *UUID) UnmarshalText(data []byte) error { id, err := ParseBytes(data) if err != nil { return err } *uuid = id return nil } // MarshalBinary implements encoding.BinaryMarshaler. func (uuid UUID) MarshalBinary() ([]byte, error) { return uuid[:], nil } // UnmarshalBinary implements encoding.BinaryUnmarshaler. func (uuid *UUID) UnmarshalBinary(data []byte) error { if len(data) != 16 { return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) } copy(uuid[:], data) return nil } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/go.mod0000644000000000000000000000003615024302467022004 0ustar rootrootmodule github.com/google/uuid dependencies/pkg/mod/github.com/google/uuid@v1.6.0/doc.go0000644000000000000000000000062715024302467022000 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package uuid generates and inspects UUIDs. // // UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security // Services. // // A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to // maps or compared directly. package uuid dependencies/pkg/mod/github.com/google/uuid@v1.6.0/time.go0000644000000000000000000000732315024302467022171 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "encoding/binary" "sync" "time" ) // A Time represents a time as the number of 100's of nanoseconds since 15 Oct // 1582. type Time int64 const ( lillian = 2299160 // Julian day of 15 Oct 1582 unix = 2440587 // Julian day of 1 Jan 1970 epoch = unix - lillian // Days between epochs g1582 = epoch * 86400 // seconds between epochs g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs ) var ( timeMu sync.Mutex lasttime uint64 // last time we returned clockSeq uint16 // clock sequence for this run timeNow = time.Now // for testing ) // UnixTime converts t the number of seconds and nanoseconds using the Unix // epoch of 1 Jan 1970. func (t Time) UnixTime() (sec, nsec int64) { sec = int64(t - g1582ns100) nsec = (sec % 10000000) * 100 sec /= 10000000 return sec, nsec } // GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and // clock sequence as well as adjusting the clock sequence as needed. An error // is returned if the current time cannot be determined. func GetTime() (Time, uint16, error) { defer timeMu.Unlock() timeMu.Lock() return getTime() } func getTime() (Time, uint16, error) { t := timeNow() // If we don't have a clock sequence already, set one. if clockSeq == 0 { setClockSequence(-1) } now := uint64(t.UnixNano()/100) + g1582ns100 // If time has gone backwards with this clock sequence then we // increment the clock sequence if now <= lasttime { clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 } lasttime = now return Time(now), clockSeq, nil } // ClockSequence returns the current clock sequence, generating one if not // already set. The clock sequence is only used for Version 1 UUIDs. // // The uuid package does not use global static storage for the clock sequence or // the last time a UUID was generated. Unless SetClockSequence is used, a new // random clock sequence is generated the first time a clock sequence is // requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) func ClockSequence() int { defer timeMu.Unlock() timeMu.Lock() return clockSequence() } func clockSequence() int { if clockSeq == 0 { setClockSequence(-1) } return int(clockSeq & 0x3fff) } // SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to // -1 causes a new sequence to be generated. func SetClockSequence(seq int) { defer timeMu.Unlock() timeMu.Lock() setClockSequence(seq) } func setClockSequence(seq int) { if seq == -1 { var b [2]byte randomBits(b[:]) // clock sequence seq = int(b[0])<<8 | int(b[1]) } oldSeq := clockSeq clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant if oldSeq != clockSeq { lasttime = 0 } } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in // uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { var t Time switch uuid.Version() { case 6: time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 t = Time(time) case 7: time := binary.BigEndian.Uint64(uuid[:8]) t = Time((time>>16)*10000 + g1582ns100) default: // forward compatible time := int64(binary.BigEndian.Uint32(uuid[0:4])) time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 t = Time(time) } return t } // ClockSequence returns the clock sequence encoded in uuid. // The clock sequence is only well defined for version 1 and 2 UUIDs. func (uuid UUID) ClockSequence() int { return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/README.md0000644000000000000000000000150715024302467022161 0ustar rootroot# uuid The uuid package generates and inspects UUIDs based on [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named code.google.com/p/go-uuid). It differs from these earlier packages in that a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install ```sh go get github.com/google/uuid ``` ###### Documentation [![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: http://pkg.go.dev/github.com/google/uuid dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null.go0000644000000000000000000000463515024302467022210 0ustar rootroot// Copyright 2021 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "bytes" "database/sql/driver" "encoding/json" "fmt" ) var jsonNull = []byte("null") // NullUUID represents a UUID that may be null. // NullUUID implements the SQL driver.Scanner interface so // it can be used as a scan destination: // // var u uuid.NullUUID // err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) // ... // if u.Valid { // // use u.UUID // } else { // // NULL value // } // type NullUUID struct { UUID UUID Valid bool // Valid is true if UUID is not NULL } // Scan implements the SQL driver.Scanner interface. func (nu *NullUUID) Scan(value interface{}) error { if value == nil { nu.UUID, nu.Valid = Nil, false return nil } err := nu.UUID.Scan(value) if err != nil { nu.Valid = false return err } nu.Valid = true return nil } // Value implements the driver Valuer interface. func (nu NullUUID) Value() (driver.Value, error) { if !nu.Valid { return nil, nil } // Delegate to UUID Value function return nu.UUID.Value() } // MarshalBinary implements encoding.BinaryMarshaler. func (nu NullUUID) MarshalBinary() ([]byte, error) { if nu.Valid { return nu.UUID[:], nil } return []byte(nil), nil } // UnmarshalBinary implements encoding.BinaryUnmarshaler. func (nu *NullUUID) UnmarshalBinary(data []byte) error { if len(data) != 16 { return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) } copy(nu.UUID[:], data) nu.Valid = true return nil } // MarshalText implements encoding.TextMarshaler. func (nu NullUUID) MarshalText() ([]byte, error) { if nu.Valid { return nu.UUID.MarshalText() } return jsonNull, nil } // UnmarshalText implements encoding.TextUnmarshaler. func (nu *NullUUID) UnmarshalText(data []byte) error { id, err := ParseBytes(data) if err != nil { nu.Valid = false return err } nu.UUID = id nu.Valid = true return nil } // MarshalJSON implements json.Marshaler. func (nu NullUUID) MarshalJSON() ([]byte, error) { if nu.Valid { return json.Marshal(nu.UUID) } return jsonNull, nil } // UnmarshalJSON implements json.Unmarshaler. func (nu *NullUUID) UnmarshalJSON(data []byte) error { if bytes.Equal(data, jsonNull) { *nu = NullUUID{} return nil // valid null UUID } err := json.Unmarshal(data, &nu.UUID) nu.Valid = err == nil return err } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTORS0000644000000000000000000000015115024302467022554 0ustar rootrootPaul Borman bmatsuo shawnps theory jboverfelt dsymonds cd1 wallclockbuilder dansouza dependencies/pkg/mod/github.com/google/uuid@v1.6.0/util.go0000644000000000000000000000360015024302467022202 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "io" ) // randomBits completely fills slice b with random data. func randomBits(b []byte) { if _, err := io.ReadFull(rander, b); err != nil { panic(err.Error()) // rand should never fail } } // xvalues returns the value of a byte as a hexadecimal digit or 255. var xvalues = [256]byte{ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, } // xtob converts hex characters x1 and x2 into a byte. func xtob(x1, x2 byte) (byte, bool) { b1 := xvalues[x1] b2 := xvalues[x2] return (b1 << 4) | b2, b1 != 255 && b2 != 255 } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/sql_test.go0000644000000000000000000000451015024302467023064 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "strings" "testing" ) func TestScan(t *testing.T) { stringTest := "f47ac10b-58cc-0372-8567-0e02b2c3d479" badTypeTest := 6 invalidTest := "f47ac10b-58cc-0372-8567-0e02b2c3d4" byteTest := make([]byte, 16) byteTestUUID := Must(Parse(stringTest)) copy(byteTest, byteTestUUID[:]) // sunny day tests var uuid UUID err := (&uuid).Scan(stringTest) if err != nil { t.Fatal(err) } err = (&uuid).Scan([]byte(stringTest)) if err != nil { t.Fatal(err) } err = (&uuid).Scan(byteTest) if err != nil { t.Fatal(err) } // bad type tests err = (&uuid).Scan(badTypeTest) if err == nil { t.Error("int correctly parsed and shouldn't have") } if !strings.Contains(err.Error(), "unable to scan type") { t.Error("attempting to parse an int returned an incorrect error message") } // invalid/incomplete uuids err = (&uuid).Scan(invalidTest) if err == nil { t.Error("invalid uuid was parsed without error") } if !strings.Contains(err.Error(), "invalid UUID") { t.Error("attempting to parse an invalid UUID returned an incorrect error message") } err = (&uuid).Scan(byteTest[:len(byteTest)-2]) if err == nil { t.Error("invalid byte uuid was parsed without error") } if !strings.Contains(err.Error(), "invalid UUID") { t.Error("attempting to parse an invalid byte UUID returned an incorrect error message") } // empty tests uuid = UUID{} var emptySlice []byte err = (&uuid).Scan(emptySlice) if err != nil { t.Fatal(err) } for _, v := range uuid { if v != 0 { t.Error("UUID was not nil after scanning empty byte slice") } } uuid = UUID{} var emptyString string err = (&uuid).Scan(emptyString) if err != nil { t.Fatal(err) } for _, v := range uuid { if v != 0 { t.Error("UUID was not nil after scanning empty byte slice") } } uuid = UUID{} err = (&uuid).Scan(nil) if err != nil { t.Fatal(err) } for _, v := range uuid { if v != 0 { t.Error("UUID was not nil after scanning nil") } } } func TestValue(t *testing.T) { stringTest := "f47ac10b-58cc-0372-8567-0e02b2c3d479" uuid := Must(Parse(stringTest)) val, _ := uuid.Value() if val != stringTest { t.Error("Value() did not return expected string") } } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node.go0000644000000000000000000000442315024302467022156 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "sync" ) var ( nodeMu sync.Mutex ifname string // name of interface being used nodeID [6]byte // hardware for version 1 UUIDs zeroID [6]byte // nodeID with only 0's ) // NodeInterface returns the name of the interface from which the NodeID was // derived. The interface "user" is returned if the NodeID was set by // SetNodeID. func NodeInterface() string { defer nodeMu.Unlock() nodeMu.Lock() return ifname } // SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. // If name is "" then the first usable interface found will be used or a random // Node ID will be generated. If a named interface cannot be found then false // is returned. // // SetNodeInterface never fails when name is "". func SetNodeInterface(name string) bool { defer nodeMu.Unlock() nodeMu.Lock() return setNodeInterface(name) } func setNodeInterface(name string) bool { iname, addr := getHardwareInterface(name) // null implementation for js if iname != "" && addr != nil { ifname = iname copy(nodeID[:], addr) return true } // We found no interfaces with a valid hardware address. If name // does not specify a specific interface generate a random Node ID // (section 4.1.6) if name == "" { ifname = "random" randomBits(nodeID[:]) return true } return false } // NodeID returns a slice of a copy of the current Node ID, setting the Node ID // if not already set. func NodeID() []byte { defer nodeMu.Unlock() nodeMu.Lock() if nodeID == zeroID { setNodeInterface("") } nid := nodeID return nid[:] } // SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes // of id are used. If id is less than 6 bytes then false is returned and the // Node ID is not set. func SetNodeID(id []byte) bool { if len(id) < 6 { return false } defer nodeMu.Unlock() nodeMu.Lock() copy(nodeID[:], id) ifname = "user" return true } // NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is // not valid. The NodeID is only well defined for version 1 and 2 UUIDs. func (uuid UUID) NodeID() []byte { var node [6]byte copy(node[:], uuid[10:]) return node[:] } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version7.go0000644000000000000000000000645315024302467023012 0ustar rootroot// Copyright 2023 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "io" ) // UUID version 7 features a time-ordered value field derived from the widely // implemented and well known Unix Epoch timestamp source, // the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. // As well as improved entropy characteristics over versions 1 or 6. // // see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 // // Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. // // NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). // Uses the randomness pool if it was enabled with EnableRandPool. // On error, NewV7 returns Nil and an error func NewV7() (UUID, error) { uuid, err := NewRandom() if err != nil { return uuid, err } makeV7(uuid[:]) return uuid, nil } // NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). // it use NewRandomFromReader fill random bits. // On error, NewV7FromReader returns Nil and an error. func NewV7FromReader(r io.Reader) (UUID, error) { uuid, err := NewRandomFromReader(r) if err != nil { return uuid, err } makeV7(uuid[:]) return uuid, nil } // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) // see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ _ = uuid[15] // bounds check t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) uuid[2] = byte(t >> 24) uuid[3] = byte(t >> 16) uuid[4] = byte(t >> 8) uuid[5] = byte(t) uuid[6] = 0x70 | (0x0F & byte(s>>8)) uuid[7] = byte(s) } // lastV7time is the last time we returned stored as: // // 52 bits of time in milliseconds since epoch // 12 bits of (fractional nanoseconds) >> 8 var lastV7time int64 const nanoPerMilli = 1000000 // getV7Time returns the time in milliseconds and nanoseconds / 256. // The returned (milli << 12 + seq) is guarenteed to be greater than // (milli << 12 + seq) returned by any previous call to getV7Time. func getV7Time() (milli, seq int64) { timeMu.Lock() defer timeMu.Unlock() nano := timeNow().UnixNano() milli = nano / nanoPerMilli // Sequence number is between 0 and 3906 (nanoPerMilli>>8) seq = (nano - milli*nanoPerMilli) >> 8 now := milli<<12 + seq if now <= lastV7time { now = lastV7time + 1 milli = now >> 12 seq = now & 0xfff } lastV7time = now return milli, seq } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/seq_test.go0000644000000000000000000000264315024302467023062 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "flag" "runtime" "testing" "time" ) // This test is only run when --regressions is passed on the go test line. var regressions = flag.Bool("regressions", false, "run uuid regression tests") // TestClockSeqRace tests for a particular race condition of returning two // identical Version1 UUIDs. The duration of 1 minute was chosen as the race // condition, before being fixed, nearly always occurred in under 30 seconds. func TestClockSeqRace(t *testing.T) { if !*regressions { t.Skip("skipping regression tests") } duration := time.Minute done := make(chan struct{}) defer close(done) ch := make(chan UUID, 10000) ncpu := runtime.NumCPU() switch ncpu { case 0, 1: // We can't run the test effectively. t.Skip("skipping race test, only one CPU detected") return default: runtime.GOMAXPROCS(ncpu) } for i := 0; i < ncpu; i++ { go func() { for { select { case <-done: return case ch <- Must(NewUUID()): } } }() } uuids := make(map[string]bool) cnt := 0 start := time.Now() for u := range ch { s := u.String() if uuids[s] { t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s) return } uuids[s] = true if time.Since(start) > duration { return } cnt++ } } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/null_test.go0000644000000000000000000001166115024302467023244 0ustar rootroot// Copyright 2021 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "bytes" "encoding/json" "testing" ) func TestNullUUIDScan(t *testing.T) { var u UUID var nu NullUUID uNilErr := u.Scan(nil) nuNilErr := nu.Scan(nil) if uNilErr != nil && nuNilErr != nil && uNilErr.Error() != nuNilErr.Error() { t.Errorf("expected errors to be equal, got %s, %s", uNilErr, nuNilErr) } uInvalidStringErr := u.Scan("test") nuInvalidStringErr := nu.Scan("test") if uInvalidStringErr != nil && nuInvalidStringErr != nil && uInvalidStringErr.Error() != nuInvalidStringErr.Error() { t.Errorf("expected errors to be equal, got %s, %s", uInvalidStringErr, nuInvalidStringErr) } valid := "12345678-abcd-1234-abcd-0123456789ab" uValidErr := u.Scan(valid) nuValidErr := nu.Scan(valid) if uValidErr != nuValidErr { t.Errorf("expected errors to be equal, got %s, %s", uValidErr, nuValidErr) } } func TestNullUUIDValue(t *testing.T) { var u UUID var nu NullUUID nuValue, nuErr := nu.Value() if nuErr != nil { t.Errorf("expected nil err, got err %s", nuErr) } if nuValue != nil { t.Errorf("expected nil value, got non-nil %s", nuValue) } u = MustParse("12345678-abcd-1234-abcd-0123456789ab") nu = NullUUID{ UUID: MustParse("12345678-abcd-1234-abcd-0123456789ab"), Valid: true, } uValue, uErr := u.Value() nuValue, nuErr = nu.Value() if uErr != nil { t.Errorf("expected nil err, got err %s", uErr) } if nuErr != nil { t.Errorf("expected nil err, got err %s", nuErr) } if uValue != nuValue { t.Errorf("expected uuid %s and nulluuid %s to be equal ", uValue, nuValue) } } func TestNullUUIDMarshalText(t *testing.T) { tests := []struct { nullUUID NullUUID }{ { nullUUID: NullUUID{}, }, { nullUUID: NullUUID{ UUID: MustParse("12345678-abcd-1234-abcd-0123456789ab"), Valid: true, }, }, } for _, test := range tests { var uText []byte var uErr error nuText, nuErr := test.nullUUID.MarshalText() if test.nullUUID.Valid { uText, uErr = test.nullUUID.UUID.MarshalText() } else { uText = []byte("null") } if nuErr != uErr { t.Errorf("expected error %e, got %e", nuErr, uErr) } if !bytes.Equal(nuText, uText) { t.Errorf("expected text data %s, got %s", string(nuText), string(uText)) } } } func TestNullUUIDUnmarshalText(t *testing.T) { tests := []struct { nullUUID NullUUID }{ { nullUUID: NullUUID{}, }, { nullUUID: NullUUID{ UUID: MustParse("12345678-abcd-1234-abcd-0123456789ab"), Valid: true, }, }, } for _, test := range tests { var uText []byte var uErr error nuText, nuErr := test.nullUUID.MarshalText() if test.nullUUID.Valid { uText, uErr = test.nullUUID.UUID.MarshalText() } else { uText = []byte("null") } if nuErr != uErr { t.Errorf("expected error %e, got %e", nuErr, uErr) } if !bytes.Equal(nuText, uText) { t.Errorf("expected text data %s, got %s", string(nuText), string(uText)) } } } func TestNullUUIDMarshalBinary(t *testing.T) { tests := []struct { nullUUID NullUUID }{ { nullUUID: NullUUID{}, }, { nullUUID: NullUUID{ UUID: MustParse("12345678-abcd-1234-abcd-0123456789ab"), Valid: true, }, }, } for _, test := range tests { var uBinary []byte var uErr error nuBinary, nuErr := test.nullUUID.MarshalBinary() if test.nullUUID.Valid { uBinary, uErr = test.nullUUID.UUID.MarshalBinary() } else { uBinary = []byte(nil) } if nuErr != uErr { t.Errorf("expected error %e, got %e", nuErr, uErr) } if !bytes.Equal(nuBinary, uBinary) { t.Errorf("expected binary data %s, got %s", string(nuBinary), string(uBinary)) } } } func TestNullUUIDMarshalJSON(t *testing.T) { jsonNull, _ := json.Marshal(nil) jsonUUID, _ := json.Marshal(MustParse("12345678-abcd-1234-abcd-0123456789ab")) tests := []struct { nullUUID NullUUID expected []byte expectedErr error }{ { nullUUID: NullUUID{}, expected: jsonNull, expectedErr: nil, }, { nullUUID: NullUUID{ UUID: MustParse(string(jsonUUID)), Valid: true, }, expected: []byte(`"12345678-abcd-1234-abcd-0123456789ab"`), expectedErr: nil, }, } for _, test := range tests { data, err := json.Marshal(&test.nullUUID) if err != test.expectedErr { t.Errorf("expected error %e, got %e", test.expectedErr, err) } if !bytes.Equal(data, test.expected) { t.Errorf("expected json data %s, got %s", string(test.expected), string(data)) } } } func TestNullUUIDUnmarshalJSON(t *testing.T) { jsonNull, _ := json.Marshal(nil) jsonUUID, _ := json.Marshal(MustParse("12345678-abcd-1234-abcd-0123456789ab")) var nu NullUUID err := json.Unmarshal(jsonNull, &nu) if err != nil || nu.Valid { t.Errorf("expected nil when unmarshaling null, got %s", err) } err = json.Unmarshal(jsonUUID, &nu) if err != nil || !nu.Valid { t.Errorf("expected nil when unmarshaling null, got %s", err) } } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CONTRIBUTING.md0000644000000000000000000000167415024302467023140 0ustar rootroot# How to contribute We definitely welcome patches and contribution to this project! ### Tips Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). Always try to include a test case! If it is not possible or not necessary, please explain why in the pull request description. ### Releasing Commits that would precipitate a SemVer change, as described in the Conventional Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) to create a release candidate pull request. Once submitted, `release-please` will create a release. For tips on how to work with `release-please`, see its documentation. ### Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). You may have already signed it for other Google projects. dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/0000755000000000000000000000000015024302467022237 5ustar rootrootdependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/release-please.yml0000644000000000000000000000004615024302467025651 0ustar rootroothandleGHRelease: true releaseType: go dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/CODEOWNERS0000644000000000000000000000041115024302467023626 0ustar rootroot# Code owners file. # This file controls who is tagged for review for any given pull request. # For syntax help see: # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax * @google/go-uuid-contributors dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/0000755000000000000000000000000015024302467024274 5ustar rootrootdependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/apidiff.yaml0000644000000000000000000000111115024302467026554 0ustar rootroot--- name: apidiff on: pull_request: branches: - master permissions: contents: read jobs: compat: runs-on: ubuntu-latest steps: - uses: actions/setup-go@v4 with: go-version: 1.21 - run: go install golang.org/x/exp/cmd/apidiff@latest - uses: actions/checkout@v3 with: ref: master - run: apidiff -w uuid.baseline . - uses: actions/checkout@v3 with: clean: false - run: | apidiff -incompatible uuid.baseline . > diff.txt cat diff.txt && ! [ -s diff.txt ] dependencies/pkg/mod/github.com/google/uuid@v1.6.0/.github/workflows/tests.yaml0000644000000000000000000000057515024302467026331 0ustar rootroot--- name: tests on: pull_request: branches: - master permissions: contents: read jobs: unit-tests: strategy: matrix: go-version: [1.19, 1.20.x, 1.21] runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: go-version: ${{ matrix.go-version }} - run: go test -v ./... dependencies/pkg/mod/github.com/google/uuid@v1.6.0/uuid.go0000644000000000000000000002264115024302467022201 0ustar rootroot// Copyright 2018 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "bytes" "crypto/rand" "encoding/hex" "errors" "fmt" "io" "strings" "sync" ) // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC // 4122. type UUID [16]byte // A Version represents a UUID's version. type Version byte // A Variant represents a UUID's variant. type Variant byte // Constants returned by Variant. const ( Invalid = Variant(iota) // Invalid UUID RFC4122 // The variant specified in RFC4122 Reserved // Reserved, NCS backward compatibility. Microsoft // Reserved, Microsoft Corporation backward compatibility. Future // Reserved for future definition. ) const randPoolSize = 16 * 16 var ( rander = rand.Reader // random function poolEnabled = false poolMu sync.Mutex poolPos = randPoolSize // protected with poolMu pool [randPoolSize]byte // protected with poolMu ) type invalidLengthError struct{ len int } func (err invalidLengthError) Error() string { return fmt.Sprintf("invalid UUID length: %d", err.len) } // IsInvalidLengthError is matcher function for custom error invalidLengthError func IsInvalidLengthError(err error) bool { _, ok := err.(invalidLengthError) return ok } // Parse decodes s into a UUID or returns an error if it cannot be parsed. Both // the standard UUID forms defined in RFC 4122 // (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, // Parse accepts non-standard strings such as the raw hex encoding // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, // e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are // examined in the latter case. Parse should not be used to validate strings as // it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} case 36 + 2: s = s[1:] // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx case 32: var ok bool for i := range uuid { uuid[i], ok = xtob(s[i*2], s[i*2+1]) if !ok { return uuid, errors.New("invalid UUID format") } } return uuid, nil default: return uuid, invalidLengthError{len(s)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { return uuid, errors.New("invalid UUID format") } for i, x := range [16]int{ 0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34, } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") } uuid[i] = v } return uuid, nil } // ParseBytes is like Parse, except it parses a byte slice instead of a string. func ParseBytes(b []byte) (UUID, error) { var uuid UUID switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} b = b[1:] case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx var ok bool for i := 0; i < 32; i += 2 { uuid[i/2], ok = xtob(b[i], b[i+1]) if !ok { return uuid, errors.New("invalid UUID format") } } return uuid, nil default: return uuid, invalidLengthError{len(b)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { return uuid, errors.New("invalid UUID format") } for i, x := range [16]int{ 0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34, } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") } uuid[i] = v } return uuid, nil } // MustParse is like Parse but panics if the string cannot be parsed. // It simplifies safe initialization of global variables holding compiled UUIDs. func MustParse(s string) UUID { uuid, err := Parse(s) if err != nil { panic(`uuid: Parse(` + s + `): ` + err.Error()) } return uuid } // FromBytes creates a new UUID from a byte slice. Returns an error if the slice // does not have a length of 16. The bytes are copied from the slice. func FromBytes(b []byte) (uuid UUID, err error) { err = uuid.UnmarshalBinary(b) return uuid, err } // Must returns uuid if err is nil and panics otherwise. func Must(uuid UUID, err error) UUID { if err != nil { panic(err) } return uuid } // Validate returns an error if s is not a properly formatted UUID in one of the following formats: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} // It returns an error if the format is invalid, otherwise nil. func Validate(s string) error { switch len(s) { // Standard UUID format case 36: // UUID with "urn:uuid:" prefix case 36 + 9: if !strings.EqualFold(s[:9], "urn:uuid:") { return fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] // UUID enclosed in braces case 36 + 2: if s[0] != '{' || s[len(s)-1] != '}' { return fmt.Errorf("invalid bracketed UUID format") } s = s[1 : len(s)-1] // UUID without hyphens case 32: for i := 0; i < len(s); i += 2 { _, ok := xtob(s[i], s[i+1]) if !ok { return errors.New("invalid UUID format") } } default: return invalidLengthError{len(s)} } // Check for standard UUID format if len(s) == 36 { if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { return errors.New("invalid UUID format") } for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { if _, ok := xtob(s[x], s[x+1]); !ok { return errors.New("invalid UUID format") } } } return nil } // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { var buf [36]byte encodeHex(buf[:], uuid) return string(buf[:]) } // URN returns the RFC 2141 URN form of uuid, // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. func (uuid UUID) URN() string { var buf [36 + 9]byte copy(buf[:], "urn:uuid:") encodeHex(buf[9:], uuid) return string(buf[:]) } func encodeHex(dst []byte, uuid UUID) { hex.Encode(dst, uuid[:4]) dst[8] = '-' hex.Encode(dst[9:13], uuid[4:6]) dst[13] = '-' hex.Encode(dst[14:18], uuid[6:8]) dst[18] = '-' hex.Encode(dst[19:23], uuid[8:10]) dst[23] = '-' hex.Encode(dst[24:], uuid[10:]) } // Variant returns the variant encoded in uuid. func (uuid UUID) Variant() Variant { switch { case (uuid[8] & 0xc0) == 0x80: return RFC4122 case (uuid[8] & 0xe0) == 0xc0: return Microsoft case (uuid[8] & 0xe0) == 0xe0: return Future default: return Reserved } } // Version returns the version of uuid. func (uuid UUID) Version() Version { return Version(uuid[6] >> 4) } func (v Version) String() string { if v > 15 { return fmt.Sprintf("BAD_VERSION_%d", v) } return fmt.Sprintf("VERSION_%d", v) } func (v Variant) String() string { switch v { case RFC4122: return "RFC4122" case Reserved: return "Reserved" case Microsoft: return "Microsoft" case Future: return "Future" case Invalid: return "Invalid" } return fmt.Sprintf("BadVariant%d", int(v)) } // SetRand sets the random number generator to r, which implements io.Reader. // If r.Read returns an error when the package requests random data then // a panic will be issued. // // Calling SetRand with nil sets the random number generator to the default // generator. func SetRand(r io.Reader) { if r == nil { rander = rand.Reader return } rander = r } // EnableRandPool enables internal randomness pool used for Random // (Version 4) UUID generation. The pool contains random bytes read from // the random number generator on demand in batches. Enabling the pool // may improve the UUID generation throughput significantly. // // Since the pool is stored on the Go heap, this feature may be a bad fit // for security sensitive applications. // // Both EnableRandPool and DisableRandPool are not thread-safe and should // only be called when there is no possibility that New or any other // UUID Version 4 generation function will be called concurrently. func EnableRandPool() { poolEnabled = true } // DisableRandPool disables the randomness pool if it was previously // enabled with EnableRandPool. // // Both EnableRandPool and DisableRandPool are not thread-safe and should // only be called when there is no possibility that New or any other // UUID Version 4 generation function will be called concurrently. func DisableRandPool() { poolEnabled = false defer poolMu.Unlock() poolMu.Lock() poolPos = randPoolSize } // UUIDs is a slice of UUID types. type UUIDs []UUID // Strings returns a string slice containing the string form of each UUID in uuids. func (uuids UUIDs) Strings() []string { var uuidStrs = make([]string, len(uuids)) for i, uuid := range uuids { uuidStrs[i] = uuid.String() } return uuidStrs } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/hash.go0000644000000000000000000000365315024302467022160 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import ( "crypto/md5" "crypto/sha1" "hash" ) // Well known namespace IDs and UUIDs var ( NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. Max = UUID{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, } ) // NewHash returns a new UUID derived from the hash of space concatenated with // data generated by h. The hash should be at least 16 byte in length. The // first 16 bytes of the hash are used to form the UUID. The version of the // UUID will be the lower 4 bits of version. NewHash is used to implement // NewMD5 and NewSHA1. func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { h.Reset() h.Write(space[:]) //nolint:errcheck h.Write(data) //nolint:errcheck s := h.Sum(nil) var uuid UUID copy(uuid[:], s) uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant return uuid } // NewMD5 returns a new MD5 (Version 3) UUID based on the // supplied name space and data. It is the same as calling: // // NewHash(md5.New(), space, data, 3) func NewMD5(space UUID, data []byte) UUID { return NewHash(md5.New(), space, data, 3) } // NewSHA1 returns a new SHA1 (Version 5) UUID based on the // supplied name space and data. It is the same as calling: // // NewHash(sha1.New(), space, data, 5) func NewSHA1(space UUID, data []byte) UUID { return NewHash(sha1.New(), space, data, 5) } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/node_net.go0000644000000000000000000000166515024302467023031 0ustar rootroot// Copyright 2017 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !js package uuid import "net" var interfaces []net.Interface // cached list of interfaces // getHardwareInterface returns the name and hardware address of interface name. // If name is "" then the name and hardware address of one of the system's // interfaces is returned. If no interfaces are found (name does not exist or // there are no interfaces) then "", nil is returned. // // Only addresses of at least 6 bytes are returned. func getHardwareInterface(name string) (string, []byte) { if interfaces == nil { var err error interfaces, err = net.Interfaces() if err != nil { return "", nil } } for _, ifs := range interfaces { if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { return ifs.Name, ifs.HardwareAddr } } return "", nil } dependencies/pkg/mod/github.com/google/uuid@v1.6.0/CHANGELOG.md0000644000000000000000000000316015024302467022510 0ustar rootroot# Changelog ## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) ### Features * add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) ### Bug Fixes * fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) * Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) ### Features * Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) ## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) ### Features * UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) ### Fixes * Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) ## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) ### Bug Fixes * Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) ## Changelog dependencies/pkg/mod/github.com/google/uuid@v1.6.0/version4.go0000644000000000000000000000401115024302467022773 0ustar rootroot// Copyright 2016 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package uuid import "io" // New creates a new random UUID or panics. New is equivalent to // the expression // // uuid.Must(uuid.NewRandom()) func New() UUID { return Must(NewRandom()) } // NewString creates a new random UUID and returns it as a string or panics. // NewString is equivalent to the expression // // uuid.New().String() func NewString() string { return Must(NewRandom()).String() } // NewRandom returns a Random (Version 4) UUID. // // The strength of the UUIDs is based on the strength of the crypto/rand // package. // // Uses the randomness pool if it was enabled with EnableRandPool. // // A note about uniqueness derived from the UUID Wikipedia entry: // // Randomly generated UUIDs have 122 random bits. One's annual risk of being // hit by a meteorite is estimated to be one chance in 17 billion, that // means the probability is about 0.00000000006 (6 × 10−11), // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { if !poolEnabled { return NewRandomFromReader(rander) } return newRandomFromPool() } // NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. func NewRandomFromReader(r io.Reader) (UUID, error) { var uuid UUID _, err := io.ReadFull(r, uuid[:]) if err != nil { return Nil, err } uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 return uuid, nil } func newRandomFromPool() (UUID, error) { var uuid UUID poolMu.Lock() if poolPos == randPoolSize { _, err := io.ReadFull(rander, pool[:]) if err != nil { poolMu.Unlock() return Nil, err } poolPos = 0 } copy(uuid[:], pool[poolPos:(poolPos+16)]) poolPos += 16 poolMu.Unlock() uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 return uuid, nil } dependencies/pkg/mod/github.com/pkg/0000775000000000000000000000000015024302466016366 5ustar rootrootdependencies/pkg/mod/github.com/pkg/errors@v0.9.1/0000755000000000000000000000000015024302466020554 5ustar rootrootdependencies/pkg/mod/github.com/pkg/errors@v0.9.1/json_test.go0000644000000000000000000000175615024302466023124 0ustar rootrootpackage errors import ( "encoding/json" "regexp" "testing" ) func TestFrameMarshalText(t *testing.T) { var tests = []struct { Frame want string }{{ initpc, `^github.com/pkg/errors\.init(\.ializers)? .+/github\.com/pkg/errors/stack_test.go:\d+$`, }, { 0, `^unknown$`, }} for i, tt := range tests { got, err := tt.Frame.MarshalText() if err != nil { t.Fatal(err) } if !regexp.MustCompile(tt.want).Match(got) { t.Errorf("test %d: MarshalJSON:\n got %q\n want %q", i+1, string(got), tt.want) } } } func TestFrameMarshalJSON(t *testing.T) { var tests = []struct { Frame want string }{{ initpc, `^"github\.com/pkg/errors\.init(\.ializers)? .+/github\.com/pkg/errors/stack_test.go:\d+"$`, }, { 0, `^"unknown"$`, }} for i, tt := range tests { got, err := json.Marshal(tt.Frame) if err != nil { t.Fatal(err) } if !regexp.MustCompile(tt.want).Match(got) { t.Errorf("test %d: MarshalJSON:\n got %q\n want %q", i+1, string(got), tt.want) } } } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/LICENSE0000644000000000000000000000244015024302466021561 0ustar rootrootCopyright (c) 2015, Dave Cheney All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/stack.go0000644000000000000000000001017515024302466022214 0ustar rootrootpackage errors import ( "fmt" "io" "path" "runtime" "strconv" "strings" ) // Frame represents a program counter inside a stack frame. // For historical reasons if Frame is interpreted as a uintptr // its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; // multiple frames may have the same PC value. func (f Frame) pc() uintptr { return uintptr(f) - 1 } // file returns the full path to the file that contains the // function for this Frame's pc. func (f Frame) file() string { fn := runtime.FuncForPC(f.pc()) if fn == nil { return "unknown" } file, _ := fn.FileLine(f.pc()) return file } // line returns the line number of source code of the // function for this Frame's pc. func (f Frame) line() int { fn := runtime.FuncForPC(f.pc()) if fn == nil { return 0 } _, line := fn.FileLine(f.pc()) return line } // name returns the name of this function, if known. func (f Frame) name() string { fn := runtime.FuncForPC(f.pc()) if fn == nil { return "unknown" } return fn.Name() } // Format formats the frame according to the fmt.Formatter interface. // // %s source file // %d source line // %n function name // %v equivalent to %s:%d // // Format accepts flags that alter the printing of some verbs, as follows: // // %+s function name and path of source file relative to the compile time // GOPATH separated by \n\t (\n\t) // %+v equivalent to %+s:%d func (f Frame) Format(s fmt.State, verb rune) { switch verb { case 's': switch { case s.Flag('+'): io.WriteString(s, f.name()) io.WriteString(s, "\n\t") io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': io.WriteString(s, strconv.Itoa(f.line())) case 'n': io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") f.Format(s, 'd') } } // MarshalText formats a stacktrace Frame as a text string. The output is the // same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. func (f Frame) MarshalText() ([]byte, error) { name := f.name() if name == "unknown" { return []byte(name), nil } return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil } // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame // Format formats the stack of Frames according to the fmt.Formatter interface. // // %s lists source files for each Frame in the stack // %v lists the source file and line number for each Frame in the stack // // Format accepts flags that alter the printing of some verbs, as follows: // // %+v Prints filename, function, and line number for each Frame in the stack. func (st StackTrace) Format(s fmt.State, verb rune) { switch verb { case 'v': switch { case s.Flag('+'): for _, f := range st { io.WriteString(s, "\n") f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: st.formatSlice(s, verb) } case 's': st.formatSlice(s, verb) } } // formatSlice will format this StackTrace into the given buffer as a slice of // Frame, only valid when called with '%s' or '%v'. func (st StackTrace) formatSlice(s fmt.State, verb rune) { io.WriteString(s, "[") for i, f := range st { if i > 0 { io.WriteString(s, " ") } f.Format(s, verb) } io.WriteString(s, "]") } // stack represents a stack of program counters. type stack []uintptr func (s *stack) Format(st fmt.State, verb rune) { switch verb { case 'v': switch { case st.Flag('+'): for _, pc := range *s { f := Frame(pc) fmt.Fprintf(st, "\n%+v", f) } } } } func (s *stack) StackTrace() StackTrace { f := make([]Frame, len(*s)) for i := 0; i < len(f); i++ { f[i] = Frame((*s)[i]) } return f } func callers() *stack { const depth = 32 var pcs [depth]uintptr n := runtime.Callers(3, pcs[:]) var st stack = pcs[0:n] return &st } // funcname removes the path prefix component of a function's name reported by func.Name(). func funcname(name string) string { i := strings.LastIndex(name, "/") name = name[i+1:] i = strings.Index(name, ".") return name[i+1:] } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/go113_test.go0000644000000000000000000000611415024302466022776 0ustar rootroot// +build go1.13 package errors import ( stderrors "errors" "fmt" "reflect" "testing" ) func TestErrorChainCompat(t *testing.T) { err := stderrors.New("error that gets wrapped") wrapped := Wrap(err, "wrapped up") if !stderrors.Is(wrapped, err) { t.Errorf("Wrap does not support Go 1.13 error chains") } } func TestIs(t *testing.T) { err := New("test") type args struct { err error target error } tests := []struct { name string args args want bool }{ { name: "with stack", args: args{ err: WithStack(err), target: err, }, want: true, }, { name: "with message", args: args{ err: WithMessage(err, "test"), target: err, }, want: true, }, { name: "with message format", args: args{ err: WithMessagef(err, "%s", "test"), target: err, }, want: true, }, { name: "std errors compatibility", args: args{ err: fmt.Errorf("wrap it: %w", err), target: err, }, want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := Is(tt.args.err, tt.args.target); got != tt.want { t.Errorf("Is() = %v, want %v", got, tt.want) } }) } } type customErr struct { msg string } func (c customErr) Error() string { return c.msg } func TestAs(t *testing.T) { var err = customErr{msg: "test message"} type args struct { err error target interface{} } tests := []struct { name string args args want bool }{ { name: "with stack", args: args{ err: WithStack(err), target: new(customErr), }, want: true, }, { name: "with message", args: args{ err: WithMessage(err, "test"), target: new(customErr), }, want: true, }, { name: "with message format", args: args{ err: WithMessagef(err, "%s", "test"), target: new(customErr), }, want: true, }, { name: "std errors compatibility", args: args{ err: fmt.Errorf("wrap it: %w", err), target: new(customErr), }, want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := As(tt.args.err, tt.args.target); got != tt.want { t.Errorf("As() = %v, want %v", got, tt.want) } ce := tt.args.target.(*customErr) if !reflect.DeepEqual(err, *ce) { t.Errorf("set target error failed, target error is %v", *ce) } }) } } func TestUnwrap(t *testing.T) { err := New("test") type args struct { err error } tests := []struct { name string args args want error }{ { name: "with stack", args: args{err: WithStack(err)}, want: err, }, { name: "with message", args: args{err: WithMessage(err, "test")}, want: err, }, { name: "with message format", args: args{err: WithMessagef(err, "%s", "test")}, want: err, }, { name: "std errors compatibility", args: args{err: fmt.Errorf("wrap: %w", err)}, want: err, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if err := Unwrap(tt.args.err); !reflect.DeepEqual(err, tt.want) { t.Errorf("Unwrap() error = %v, want %v", err, tt.want) } }) } } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/README.md0000644000000000000000000000523515024302466022040 0ustar rootroot# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) Package errors provides simple error handling primitives. `go get github.com/pkg/errors` The traditional error handling idiom in Go is roughly akin to ```go if err != nil { return err } ``` which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. ## Adding context to an error The errors.Wrap function returns a new error that adds context to the original error. For example ```go _, err := ioutil.ReadAll(r) if err != nil { return errors.Wrap(err, "read failed") } ``` ## Retrieving the cause of an error Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. ```go type causer interface { Cause() error } ``` `errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: ```go switch err := errors.Cause(err).(type) { case *MyError: // handle specifically default: // unknown error } ``` [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). ## Roadmap With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) - 1.0. Final release. ## Contributing Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. Before sending a PR, please discuss your change by raising an issue. ## License BSD-2-Clause dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/bench_test.go0000644000000000000000000000367715024302466023236 0ustar rootroot// +build go1.7 package errors import ( "fmt" "testing" stderrors "errors" ) func noErrors(at, depth int) error { if at >= depth { return stderrors.New("no error") } return noErrors(at+1, depth) } func yesErrors(at, depth int) error { if at >= depth { return New("ye error") } return yesErrors(at+1, depth) } // GlobalE is an exported global to store the result of benchmark results, // preventing the compiler from optimising the benchmark functions away. var GlobalE interface{} func BenchmarkErrors(b *testing.B) { type run struct { stack int std bool } runs := []run{ {10, false}, {10, true}, {100, false}, {100, true}, {1000, false}, {1000, true}, } for _, r := range runs { part := "pkg/errors" if r.std { part = "errors" } name := fmt.Sprintf("%s-stack-%d", part, r.stack) b.Run(name, func(b *testing.B) { var err error f := yesErrors if r.std { f = noErrors } b.ReportAllocs() for i := 0; i < b.N; i++ { err = f(0, r.stack) } b.StopTimer() GlobalE = err }) } } func BenchmarkStackFormatting(b *testing.B) { type run struct { stack int format string } runs := []run{ {10, "%s"}, {10, "%v"}, {10, "%+v"}, {30, "%s"}, {30, "%v"}, {30, "%+v"}, {60, "%s"}, {60, "%v"}, {60, "%+v"}, } var stackStr string for _, r := range runs { name := fmt.Sprintf("%s-stack-%d", r.format, r.stack) b.Run(name, func(b *testing.B) { err := yesErrors(0, r.stack) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { stackStr = fmt.Sprintf(r.format, err) } b.StopTimer() }) } for _, r := range runs { name := fmt.Sprintf("%s-stacktrace-%d", r.format, r.stack) b.Run(name, func(b *testing.B) { err := yesErrors(0, r.stack) st := err.(*fundamental).stack.StackTrace() b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { stackStr = fmt.Sprintf(r.format, st) } b.StopTimer() }) } GlobalE = stackStr } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/appveyor.yml0000644000000000000000000000117715024302466023152 0ustar rootrootversion: build-{build}.{branch} clone_folder: C:\gopath\src\github.com\pkg\errors shallow_clone: true # for startup speed environment: GOPATH: C:\gopath platform: - x64 # http://www.appveyor.com/docs/installed-software install: # some helpful output for debugging builds - go version - go env # pre-installed MinGW at C:\MinGW is 32bit only # but MSYS2 at C:\msys64 has mingw64 - set PATH=C:\msys64\mingw64\bin;%PATH% - gcc --version - g++ --version build_script: - go install -v ./... test_script: - set PATH=C:\gopath\bin;%PATH% - go test -v ./... #artifacts: # - path: '%GOPATH%\bin\*.exe' deploy: off dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/.travis.yml0000644000000000000000000000017015024302466022663 0ustar rootrootlanguage: go go_import_path: github.com/pkg/errors go: - 1.11.x - 1.12.x - 1.13.x - tip script: - make check dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/Makefile0000644000000000000000000000154715024302466022223 0ustar rootrootPKGS := github.com/pkg/errors SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) GO := go check: test vet gofmt misspell unconvert staticcheck ineffassign unparam test: $(GO) test $(PKGS) vet: | test $(GO) vet $(PKGS) staticcheck: $(GO) get honnef.co/go/tools/cmd/staticcheck staticcheck -checks all $(PKGS) misspell: $(GO) get github.com/client9/misspell/cmd/misspell misspell \ -locale GB \ -error \ *.md *.go unconvert: $(GO) get github.com/mdempsky/unconvert unconvert -v $(PKGS) ineffassign: $(GO) get github.com/gordonklaus/ineffassign find $(SRCDIRS) -name '*.go' | xargs ineffassign pedantic: check errcheck unparam: $(GO) get mvdan.cc/unparam unparam ./... errcheck: $(GO) get github.com/kisielk/errcheck errcheck $(PKGS) gofmt: @echo Checking code is gofmted @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/go113.go0000644000000000000000000000265315024302466021743 0ustar rootroot// +build go1.13 package errors import ( stderrors "errors" ) // Is reports whether any error in err's chain matches target. // // The chain consists of err itself followed by the sequence of errors obtained by // repeatedly calling Unwrap. // // An error is considered to match a target if it is equal to that target or if // it implements a method Is(error) bool such that Is(target) returns true. func Is(err, target error) bool { return stderrors.Is(err, target) } // As finds the first error in err's chain that matches target, and if so, sets // target to that error value and returns true. // // The chain consists of err itself followed by the sequence of errors obtained by // repeatedly calling Unwrap. // // An error matches target if the error's concrete value is assignable to the value // pointed to by target, or if the error has a method As(interface{}) bool such that // As(target) returns true. In the latter case, the As method is responsible for // setting target. // // As will panic if target is not a non-nil pointer to either a type that implements // error, or to any interface type. As returns false if err is nil. func As(err error, target interface{}) bool { return stderrors.As(err, target) } // Unwrap returns the result of calling the Unwrap method on err, if err's // type contains an Unwrap method returning error. // Otherwise, Unwrap returns nil. func Unwrap(err error) error { return stderrors.Unwrap(err) } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/example_test.go0000644000000000000000000001244715024302466023605 0ustar rootrootpackage errors_test import ( "fmt" "github.com/pkg/errors" ) func ExampleNew() { err := errors.New("whoops") fmt.Println(err) // Output: whoops } func ExampleNew_printf() { err := errors.New("whoops") fmt.Printf("%+v", err) // Example output: // whoops // github.com/pkg/errors_test.ExampleNew_printf // /home/dfc/src/github.com/pkg/errors/example_test.go:17 // testing.runExample // /home/dfc/go/src/testing/example.go:114 // testing.RunExamples // /home/dfc/go/src/testing/example.go:38 // testing.(*M).Run // /home/dfc/go/src/testing/testing.go:744 // main.main // /github.com/pkg/errors/_test/_testmain.go:106 // runtime.main // /home/dfc/go/src/runtime/proc.go:183 // runtime.goexit // /home/dfc/go/src/runtime/asm_amd64.s:2059 } func ExampleWithMessage() { cause := errors.New("whoops") err := errors.WithMessage(cause, "oh noes") fmt.Println(err) // Output: oh noes: whoops } func ExampleWithStack() { cause := errors.New("whoops") err := errors.WithStack(cause) fmt.Println(err) // Output: whoops } func ExampleWithStack_printf() { cause := errors.New("whoops") err := errors.WithStack(cause) fmt.Printf("%+v", err) // Example Output: // whoops // github.com/pkg/errors_test.ExampleWithStack_printf // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 // testing.runExample // /usr/lib/go/src/testing/example.go:114 // testing.RunExamples // /usr/lib/go/src/testing/example.go:38 // testing.(*M).Run // /usr/lib/go/src/testing/testing.go:744 // main.main // github.com/pkg/errors/_test/_testmain.go:106 // runtime.main // /usr/lib/go/src/runtime/proc.go:183 // runtime.goexit // /usr/lib/go/src/runtime/asm_amd64.s:2086 // github.com/pkg/errors_test.ExampleWithStack_printf // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 // testing.runExample // /usr/lib/go/src/testing/example.go:114 // testing.RunExamples // /usr/lib/go/src/testing/example.go:38 // testing.(*M).Run // /usr/lib/go/src/testing/testing.go:744 // main.main // github.com/pkg/errors/_test/_testmain.go:106 // runtime.main // /usr/lib/go/src/runtime/proc.go:183 // runtime.goexit // /usr/lib/go/src/runtime/asm_amd64.s:2086 } func ExampleWrap() { cause := errors.New("whoops") err := errors.Wrap(cause, "oh noes") fmt.Println(err) // Output: oh noes: whoops } func fn() error { e1 := errors.New("error") e2 := errors.Wrap(e1, "inner") e3 := errors.Wrap(e2, "middle") return errors.Wrap(e3, "outer") } func ExampleCause() { err := fn() fmt.Println(err) fmt.Println(errors.Cause(err)) // Output: outer: middle: inner: error // error } func ExampleWrap_extended() { err := fn() fmt.Printf("%+v\n", err) // Example output: // error // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:47 // github.com/pkg/errors_test.ExampleCause_printf // /home/dfc/src/github.com/pkg/errors/example_test.go:63 // testing.runExample // /home/dfc/go/src/testing/example.go:114 // testing.RunExamples // /home/dfc/go/src/testing/example.go:38 // testing.(*M).Run // /home/dfc/go/src/testing/testing.go:744 // main.main // /github.com/pkg/errors/_test/_testmain.go:104 // runtime.main // /home/dfc/go/src/runtime/proc.go:183 // runtime.goexit // /home/dfc/go/src/runtime/asm_amd64.s:2059 // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer } func ExampleWrapf() { cause := errors.New("whoops") err := errors.Wrapf(cause, "oh noes #%d", 2) fmt.Println(err) // Output: oh noes #2: whoops } func ExampleErrorf_extended() { err := errors.Errorf("whoops: %s", "foo") fmt.Printf("%+v", err) // Example output: // whoops: foo // github.com/pkg/errors_test.ExampleErrorf // /home/dfc/src/github.com/pkg/errors/example_test.go:101 // testing.runExample // /home/dfc/go/src/testing/example.go:114 // testing.RunExamples // /home/dfc/go/src/testing/example.go:38 // testing.(*M).Run // /home/dfc/go/src/testing/testing.go:744 // main.main // /github.com/pkg/errors/_test/_testmain.go:102 // runtime.main // /home/dfc/go/src/runtime/proc.go:183 // runtime.goexit // /home/dfc/go/src/runtime/asm_amd64.s:2059 } func Example_stackTrace() { type stackTracer interface { StackTrace() errors.StackTrace } err, ok := errors.Cause(fn()).(stackTracer) if !ok { panic("oops, err does not implement stackTracer") } st := err.StackTrace() fmt.Printf("%+v", st[0:2]) // top two frames // Example output: // github.com/pkg/errors_test.fn // /home/dfc/src/github.com/pkg/errors/example_test.go:47 // github.com/pkg/errors_test.Example_stackTrace // /home/dfc/src/github.com/pkg/errors/example_test.go:127 } func ExampleCause_printf() { err := errors.Wrap(func() error { return func() error { return errors.New("hello world") }() }(), "failed") fmt.Printf("%v", err) // Output: failed: hello world } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/errors_test.go0000644000000000000000000001304515024302466023461 0ustar rootrootpackage errors import ( "errors" "fmt" "io" "reflect" "testing" ) func TestNew(t *testing.T) { tests := []struct { err string want error }{ {"", fmt.Errorf("")}, {"foo", fmt.Errorf("foo")}, {"foo", New("foo")}, {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, } for _, tt := range tests { got := New(tt.err) if got.Error() != tt.want.Error() { t.Errorf("New.Error(): got: %q, want %q", got, tt.want) } } } func TestWrapNil(t *testing.T) { got := Wrap(nil, "no error") if got != nil { t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) } } func TestWrap(t *testing.T) { tests := []struct { err error message string want string }{ {io.EOF, "read error", "read error: EOF"}, {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, } for _, tt := range tests { got := Wrap(tt.err, tt.message).Error() if got != tt.want { t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) } } } type nilError struct{} func (nilError) Error() string { return "nil error" } func TestCause(t *testing.T) { x := New("error") tests := []struct { err error want error }{{ // nil error is nil err: nil, want: nil, }, { // explicit nil error is nil err: (error)(nil), want: nil, }, { // typed nil is nil err: (*nilError)(nil), want: (*nilError)(nil), }, { // uncaused error is unaffected err: io.EOF, want: io.EOF, }, { // caused error returns cause err: Wrap(io.EOF, "ignored"), want: io.EOF, }, { err: x, // return from errors.New want: x, }, { WithMessage(nil, "whoops"), nil, }, { WithMessage(io.EOF, "whoops"), io.EOF, }, { WithStack(nil), nil, }, { WithStack(io.EOF), io.EOF, }} for i, tt := range tests { got := Cause(tt.err) if !reflect.DeepEqual(got, tt.want) { t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) } } } func TestWrapfNil(t *testing.T) { got := Wrapf(nil, "no error") if got != nil { t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) } } func TestWrapf(t *testing.T) { tests := []struct { err error message string want string }{ {io.EOF, "read error", "read error: EOF"}, {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, } for _, tt := range tests { got := Wrapf(tt.err, tt.message).Error() if got != tt.want { t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) } } } func TestErrorf(t *testing.T) { tests := []struct { err error want string }{ {Errorf("read error without format specifiers"), "read error without format specifiers"}, {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, } for _, tt := range tests { got := tt.err.Error() if got != tt.want { t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) } } } func TestWithStackNil(t *testing.T) { got := WithStack(nil) if got != nil { t.Errorf("WithStack(nil): got %#v, expected nil", got) } } func TestWithStack(t *testing.T) { tests := []struct { err error want string }{ {io.EOF, "EOF"}, {WithStack(io.EOF), "EOF"}, } for _, tt := range tests { got := WithStack(tt.err).Error() if got != tt.want { t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) } } } func TestWithMessageNil(t *testing.T) { got := WithMessage(nil, "no error") if got != nil { t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) } } func TestWithMessage(t *testing.T) { tests := []struct { err error message string want string }{ {io.EOF, "read error", "read error: EOF"}, {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, } for _, tt := range tests { got := WithMessage(tt.err, tt.message).Error() if got != tt.want { t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) } } } func TestWithMessagefNil(t *testing.T) { got := WithMessagef(nil, "no error") if got != nil { t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) } } func TestWithMessagef(t *testing.T) { tests := []struct { err error message string want string }{ {io.EOF, "read error", "read error: EOF"}, {WithMessagef(io.EOF, "read error without format specifier"), "client error", "client error: read error without format specifier: EOF"}, {WithMessagef(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, } for _, tt := range tests { got := WithMessagef(tt.err, tt.message).Error() if got != tt.want { t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) } } } // errors.New, etc values are not expected to be compared by value // but the change in errors#27 made them incomparable. Assert that // various kinds of errors have a functional equality operator, even // if the result of that equality is always false. func TestErrorEquality(t *testing.T) { vals := []error{ nil, io.EOF, errors.New("EOF"), New("EOF"), Errorf("EOF"), Wrap(io.EOF, "EOF"), Wrapf(io.EOF, "EOF%d", 2), WithMessage(nil, "whoops"), WithMessage(io.EOF, "whoops"), WithStack(io.EOF), WithStack(nil), } for i := range vals { for j := range vals { _ = vals[i] == vals[j] // mustn't panic } } } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/format_test.go0000644000000000000000000003206415024302466023437 0ustar rootrootpackage errors import ( "errors" "fmt" "io" "regexp" "strings" "testing" ) func TestFormatNew(t *testing.T) { tests := []struct { error format string want string }{{ New("error"), "%s", "error", }, { New("error"), "%v", "error", }, { New("error"), "%+v", "error\n" + "github.com/pkg/errors.TestFormatNew\n" + "\t.+/github.com/pkg/errors/format_test.go:26", }, { New("error"), "%q", `"error"`, }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func TestFormatErrorf(t *testing.T) { tests := []struct { error format string want string }{{ Errorf("%s", "error"), "%s", "error", }, { Errorf("%s", "error"), "%v", "error", }, { Errorf("%s", "error"), "%+v", "error\n" + "github.com/pkg/errors.TestFormatErrorf\n" + "\t.+/github.com/pkg/errors/format_test.go:56", }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func TestFormatWrap(t *testing.T) { tests := []struct { error format string want string }{{ Wrap(New("error"), "error2"), "%s", "error2: error", }, { Wrap(New("error"), "error2"), "%v", "error2: error", }, { Wrap(New("error"), "error2"), "%+v", "error\n" + "github.com/pkg/errors.TestFormatWrap\n" + "\t.+/github.com/pkg/errors/format_test.go:82", }, { Wrap(io.EOF, "error"), "%s", "error: EOF", }, { Wrap(io.EOF, "error"), "%v", "error: EOF", }, { Wrap(io.EOF, "error"), "%+v", "EOF\n" + "error\n" + "github.com/pkg/errors.TestFormatWrap\n" + "\t.+/github.com/pkg/errors/format_test.go:96", }, { Wrap(Wrap(io.EOF, "error1"), "error2"), "%+v", "EOF\n" + "error1\n" + "github.com/pkg/errors.TestFormatWrap\n" + "\t.+/github.com/pkg/errors/format_test.go:103\n", }, { Wrap(New("error with space"), "context"), "%q", `"context: error with space"`, }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func TestFormatWrapf(t *testing.T) { tests := []struct { error format string want string }{{ Wrapf(io.EOF, "error%d", 2), "%s", "error2: EOF", }, { Wrapf(io.EOF, "error%d", 2), "%v", "error2: EOF", }, { Wrapf(io.EOF, "error%d", 2), "%+v", "EOF\n" + "error2\n" + "github.com/pkg/errors.TestFormatWrapf\n" + "\t.+/github.com/pkg/errors/format_test.go:134", }, { Wrapf(New("error"), "error%d", 2), "%s", "error2: error", }, { Wrapf(New("error"), "error%d", 2), "%v", "error2: error", }, { Wrapf(New("error"), "error%d", 2), "%+v", "error\n" + "github.com/pkg/errors.TestFormatWrapf\n" + "\t.+/github.com/pkg/errors/format_test.go:149", }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func TestFormatWithStack(t *testing.T) { tests := []struct { error format string want []string }{{ WithStack(io.EOF), "%s", []string{"EOF"}, }, { WithStack(io.EOF), "%v", []string{"EOF"}, }, { WithStack(io.EOF), "%+v", []string{"EOF", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:175"}, }, { WithStack(New("error")), "%s", []string{"error"}, }, { WithStack(New("error")), "%v", []string{"error"}, }, { WithStack(New("error")), "%+v", []string{"error", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:189", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:189"}, }, { WithStack(WithStack(io.EOF)), "%+v", []string{"EOF", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:197", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:197"}, }, { WithStack(WithStack(Wrapf(io.EOF, "message"))), "%+v", []string{"EOF", "message", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:205", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:205", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:205"}, }, { WithStack(Errorf("error%d", 1)), "%+v", []string{"error1", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:216", "github.com/pkg/errors.TestFormatWithStack\n" + "\t.+/github.com/pkg/errors/format_test.go:216"}, }} for i, tt := range tests { testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) } } func TestFormatWithMessage(t *testing.T) { tests := []struct { error format string want []string }{{ WithMessage(New("error"), "error2"), "%s", []string{"error2: error"}, }, { WithMessage(New("error"), "error2"), "%v", []string{"error2: error"}, }, { WithMessage(New("error"), "error2"), "%+v", []string{ "error", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:244", "error2"}, }, { WithMessage(io.EOF, "addition1"), "%s", []string{"addition1: EOF"}, }, { WithMessage(io.EOF, "addition1"), "%v", []string{"addition1: EOF"}, }, { WithMessage(io.EOF, "addition1"), "%+v", []string{"EOF", "addition1"}, }, { WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), "%v", []string{"addition2: addition1: EOF"}, }, { WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), "%+v", []string{"EOF", "addition1", "addition2"}, }, { Wrap(WithMessage(io.EOF, "error1"), "error2"), "%+v", []string{"EOF", "error1", "error2", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:272"}, }, { WithMessage(Errorf("error%d", 1), "error2"), "%+v", []string{"error1", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:278", "error2"}, }, { WithMessage(WithStack(io.EOF), "error"), "%+v", []string{ "EOF", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:285", "error"}, }, { WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), "%+v", []string{ "EOF", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:293", "inside-error", "github.com/pkg/errors.TestFormatWithMessage\n" + "\t.+/github.com/pkg/errors/format_test.go:293", "outside-error"}, }} for i, tt := range tests { testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) } } func TestFormatGeneric(t *testing.T) { starts := []struct { err error want []string }{ {New("new-error"), []string{ "new-error", "github.com/pkg/errors.TestFormatGeneric\n" + "\t.+/github.com/pkg/errors/format_test.go:315"}, }, {Errorf("errorf-error"), []string{ "errorf-error", "github.com/pkg/errors.TestFormatGeneric\n" + "\t.+/github.com/pkg/errors/format_test.go:319"}, }, {errors.New("errors-new-error"), []string{ "errors-new-error"}, }, } wrappers := []wrapper{ { func(err error) error { return WithMessage(err, "with-message") }, []string{"with-message"}, }, { func(err error) error { return WithStack(err) }, []string{ "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + ".+/github.com/pkg/errors/format_test.go:333", }, }, { func(err error) error { return Wrap(err, "wrap-error") }, []string{ "wrap-error", "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + ".+/github.com/pkg/errors/format_test.go:339", }, }, { func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, []string{ "wrapf-error1", "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + ".+/github.com/pkg/errors/format_test.go:346", }, }, } for s := range starts { err := starts[s].err want := starts[s].want testFormatCompleteCompare(t, s, err, "%+v", want, false) testGenericRecursive(t, err, want, wrappers, 3) } } func wrappedNew(message string) error { // This function will be mid-stack inlined in go 1.12+ return New(message) } func TestFormatWrappedNew(t *testing.T) { tests := []struct { error format string want string }{{ wrappedNew("error"), "%+v", "error\n" + "github.com/pkg/errors.wrappedNew\n" + "\t.+/github.com/pkg/errors/format_test.go:364\n" + "github.com/pkg/errors.TestFormatWrappedNew\n" + "\t.+/github.com/pkg/errors/format_test.go:373", }} for i, tt := range tests { testFormatRegexp(t, i, tt.error, tt.format, tt.want) } } func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { t.Helper() got := fmt.Sprintf(format, arg) gotLines := strings.SplitN(got, "\n", -1) wantLines := strings.SplitN(want, "\n", -1) if len(wantLines) > len(gotLines) { t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) return } for i, w := range wantLines { match, err := regexp.MatchString(w, gotLines[i]) if err != nil { t.Fatal(err) } if !match { t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) } } } var stackLineR = regexp.MustCompile(`\.`) // parseBlocks parses input into a slice, where: // - incase entry contains a newline, its a stacktrace // - incase entry contains no newline, its a solo line. // // Detecting stack boundaries only works incase the WithStack-calls are // to be found on the same line, thats why it is optionally here. // // Example use: // // for _, e := range blocks { // if strings.ContainsAny(e, "\n") { // // Match as stack // } else { // // Match as line // } // } // func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { var blocks []string stack := "" wasStack := false lines := map[string]bool{} // already found lines for _, l := range strings.Split(input, "\n") { isStackLine := stackLineR.MatchString(l) switch { case !isStackLine && wasStack: blocks = append(blocks, stack, l) stack = "" lines = map[string]bool{} case isStackLine: if wasStack { // Detecting two stacks after another, possible cause lines match in // our tests due to WithStack(WithStack(io.EOF)) on same line. if detectStackboundaries { if lines[l] { if len(stack) == 0 { return nil, errors.New("len of block must not be zero here") } blocks = append(blocks, stack) stack = l lines = map[string]bool{l: true} continue } } stack = stack + "\n" + l } else { stack = l } lines[l] = true case !isStackLine && !wasStack: blocks = append(blocks, l) default: return nil, errors.New("must not happen") } wasStack = isStackLine } // Use up stack if stack != "" { blocks = append(blocks, stack) } return blocks, nil } func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { gotStr := fmt.Sprintf(format, arg) got, err := parseBlocks(gotStr, detectStackBoundaries) if err != nil { t.Fatal(err) } if len(got) != len(want) { t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) } for i := range got { if strings.ContainsAny(want[i], "\n") { // Match as stack match, err := regexp.MatchString(want[i], got[i]) if err != nil { t.Fatal(err) } if !match { t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) } } else { // Match as message if got[i] != want[i] { t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) } } } } type wrapper struct { wrap func(err error) error want []string } func prettyBlocks(blocks []string) string { var out []string for _, b := range blocks { out = append(out, fmt.Sprintf("%v", b)) } return " " + strings.Join(out, "\n ") } func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { if len(beforeWant) == 0 { panic("beforeWant must not be empty") } for _, w := range list { if len(w.want) == 0 { panic("want must not be empty") } err := w.wrap(beforeErr) // Copy required cause append(beforeWant, ..) modified beforeWant subtly. beforeCopy := make([]string, len(beforeWant)) copy(beforeCopy, beforeWant) beforeWant := beforeCopy last := len(beforeWant) - 1 var want []string // Merge two stacks behind each other. if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) } else { want = append(beforeWant, w.want...) } testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) if maxDepth > 0 { testGenericRecursive(t, err, want, list, maxDepth-1) } } } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/.gitignore0000644000000000000000000000041215024302466022541 0ustar rootroot# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/errors.go0000644000000000000000000001641715024302466022430 0ustar rootroot// Package errors provides simple error handling primitives. // // The traditional error handling idiom in Go is roughly akin to // // if err != nil { // return err // } // // which when applied recursively up the call stack results in error reports // without context or debugging information. The errors package allows // programmers to add context to the failure path in their code in a way // that does not destroy the original value of the error. // // Adding context to an error // // The errors.Wrap function returns a new error that adds context to the // original error by recording a stack trace at the point Wrap is called, // together with the supplied message. For example // // _, err := ioutil.ReadAll(r) // if err != nil { // return errors.Wrap(err, "read failed") // } // // If additional control is required, the errors.WithStack and // errors.WithMessage functions destructure errors.Wrap into its component // operations: annotating an error with a stack trace and with a message, // respectively. // // Retrieving the cause of an error // // Using errors.Wrap constructs a stack of errors, adding context to the // preceding error. Depending on the nature of the error it may be necessary // to reverse the operation of errors.Wrap to retrieve the original error // for inspection. Any error value which implements this interface // // type causer interface { // Cause() error // } // // can be inspected by errors.Cause. errors.Cause will recursively retrieve // the topmost error that does not implement causer, which is assumed to be // the original cause. For example: // // switch err := errors.Cause(err).(type) { // case *MyError: // // handle specifically // default: // // unknown error // } // // Although the causer interface is not exported by this package, it is // considered a part of its stable public interface. // // Formatted printing of errors // // All error values returned from this package implement fmt.Formatter and can // be formatted by the fmt package. The following verbs are supported: // // %s print the error. If the error has a Cause it will be // printed recursively. // %v see %s // %+v extended format. Each Frame of the error's StackTrace will // be printed in detail. // // Retrieving the stack trace of an error or wrapper // // New, Errorf, Wrap, and Wrapf record a stack trace at the point they are // invoked. This information can be retrieved with the following interface: // // type stackTracer interface { // StackTrace() errors.StackTrace // } // // The returned errors.StackTrace type is defined as // // type StackTrace []Frame // // The Frame type represents a call site in the stack trace. Frame supports // the fmt.Formatter interface that can be used for printing information about // the stack trace of this error. For example: // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { // fmt.Printf("%+s:%d\n", f, f) // } // } // // Although the stackTracer interface is not exported by this package, it is // considered a part of its stable public interface. // // See the documentation for Frame.Format for more details. package errors import ( "fmt" "io" ) // New returns an error with the supplied message. // New also records the stack trace at the point it was called. func New(message string) error { return &fundamental{ msg: message, stack: callers(), } } // Errorf formats according to a format specifier and returns the string // as a value that satisfies error. // Errorf also records the stack trace at the point it was called. func Errorf(format string, args ...interface{}) error { return &fundamental{ msg: fmt.Sprintf(format, args...), stack: callers(), } } // fundamental is an error that has a message and a stack, but no caller. type fundamental struct { msg string *stack } func (f *fundamental) Error() string { return f.msg } func (f *fundamental) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { io.WriteString(s, f.msg) f.stack.Format(s, verb) return } fallthrough case 's': io.WriteString(s, f.msg) case 'q': fmt.Fprintf(s, "%q", f.msg) } } // WithStack annotates err with a stack trace at the point WithStack was called. // If err is nil, WithStack returns nil. func WithStack(err error) error { if err == nil { return nil } return &withStack{ err, callers(), } } type withStack struct { error *stack } func (w *withStack) Cause() error { return w.error } // Unwrap provides compatibility for Go 1.13 error chains. func (w *withStack) Unwrap() error { return w.error } func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { fmt.Fprintf(s, "%+v", w.Cause()) w.stack.Format(s, verb) return } fallthrough case 's': io.WriteString(s, w.Error()) case 'q': fmt.Fprintf(s, "%q", w.Error()) } } // Wrap returns an error annotating err with a stack trace // at the point Wrap is called, and the supplied message. // If err is nil, Wrap returns nil. func Wrap(err error, message string) error { if err == nil { return nil } err = &withMessage{ cause: err, msg: message, } return &withStack{ err, callers(), } } // Wrapf returns an error annotating err with a stack trace // at the point Wrapf is called, and the format specifier. // If err is nil, Wrapf returns nil. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { return nil } err = &withMessage{ cause: err, msg: fmt.Sprintf(format, args...), } return &withStack{ err, callers(), } } // WithMessage annotates err with a new message. // If err is nil, WithMessage returns nil. func WithMessage(err error, message string) error { if err == nil { return nil } return &withMessage{ cause: err, msg: message, } } // WithMessagef annotates err with the format specifier. // If err is nil, WithMessagef returns nil. func WithMessagef(err error, format string, args ...interface{}) error { if err == nil { return nil } return &withMessage{ cause: err, msg: fmt.Sprintf(format, args...), } } type withMessage struct { cause error msg string } func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } // Unwrap provides compatibility for Go 1.13 error chains. func (w *withMessage) Unwrap() error { return w.cause } func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { fmt.Fprintf(s, "%+v\n", w.Cause()) io.WriteString(s, w.msg) return } fallthrough case 's', 'q': io.WriteString(s, w.Error()) } } // Cause returns the underlying cause of the error, if possible. // An error value has a cause if it implements the following // interface: // // type causer interface { // Cause() error // } // // If the error does not implement Cause, the original error will // be returned. If the error is nil, nil will be returned without further // investigation. func Cause(err error) error { type causer interface { Cause() error } for err != nil { cause, ok := err.(causer) if !ok { break } err = cause.Cause() } return err } dependencies/pkg/mod/github.com/pkg/errors@v0.9.1/stack_test.go0000644000000000000000000001131515024302466023250 0ustar rootrootpackage errors import ( "fmt" "runtime" "testing" ) var initpc = caller() type X struct{} // val returns a Frame pointing to itself. func (x X) val() Frame { return caller() } // ptr returns a Frame pointing to itself. func (x *X) ptr() Frame { return caller() } func TestFrameFormat(t *testing.T) { var tests = []struct { Frame format string want string }{{ initpc, "%s", "stack_test.go", }, { initpc, "%+s", "github.com/pkg/errors.init\n" + "\t.+/github.com/pkg/errors/stack_test.go", }, { 0, "%s", "unknown", }, { 0, "%+s", "unknown", }, { initpc, "%d", "9", }, { 0, "%d", "0", }, { initpc, "%n", "init", }, { func() Frame { var x X return x.ptr() }(), "%n", `\(\*X\).ptr`, }, { func() Frame { var x X return x.val() }(), "%n", "X.val", }, { 0, "%n", "", }, { initpc, "%v", "stack_test.go:9", }, { initpc, "%+v", "github.com/pkg/errors.init\n" + "\t.+/github.com/pkg/errors/stack_test.go:9", }, { 0, "%v", "unknown:0", }} for i, tt := range tests { testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) } } func TestFuncname(t *testing.T) { tests := []struct { name, want string }{ {"", ""}, {"runtime.main", "main"}, {"github.com/pkg/errors.funcname", "funcname"}, {"funcname", "funcname"}, {"io.copyBuffer", "copyBuffer"}, {"main.(*R).Write", "(*R).Write"}, } for _, tt := range tests { got := funcname(tt.name) want := tt.want if got != want { t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) } } } func TestStackTrace(t *testing.T) { tests := []struct { err error want []string }{{ New("ooh"), []string{ "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:121", }, }, { Wrap(New("ooh"), "ahh"), []string{ "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:126", // this is the stack of Wrap, not New }, }, { Cause(Wrap(New("ooh"), "ahh")), []string{ "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:131", // this is the stack of New }, }, { func() error { return New("ooh") }(), []string{ `github.com/pkg/errors.TestStackTrace.func1` + "\n\t.+/github.com/pkg/errors/stack_test.go:136", // this is the stack of New "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:136", // this is the stack of New's caller }, }, { Cause(func() error { return func() error { return Errorf("hello %s", fmt.Sprintf("world: %s", "ooh")) }() }()), []string{ `github.com/pkg/errors.TestStackTrace.func2.1` + "\n\t.+/github.com/pkg/errors/stack_test.go:145", // this is the stack of Errorf `github.com/pkg/errors.TestStackTrace.func2` + "\n\t.+/github.com/pkg/errors/stack_test.go:146", // this is the stack of Errorf's caller "github.com/pkg/errors.TestStackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:147", // this is the stack of Errorf's caller's caller }, }} for i, tt := range tests { x, ok := tt.err.(interface { StackTrace() StackTrace }) if !ok { t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) continue } st := x.StackTrace() for j, want := range tt.want { testFormatRegexp(t, i, st[j], "%+v", want) } } } func stackTrace() StackTrace { const depth = 8 var pcs [depth]uintptr n := runtime.Callers(1, pcs[:]) var st stack = pcs[0:n] return st.StackTrace() } func TestStackTraceFormat(t *testing.T) { tests := []struct { StackTrace format string want string }{{ nil, "%s", `\[\]`, }, { nil, "%v", `\[\]`, }, { nil, "%+v", "", }, { nil, "%#v", `\[\]errors.Frame\(nil\)`, }, { make(StackTrace, 0), "%s", `\[\]`, }, { make(StackTrace, 0), "%v", `\[\]`, }, { make(StackTrace, 0), "%+v", "", }, { make(StackTrace, 0), "%#v", `\[\]errors.Frame{}`, }, { stackTrace()[:2], "%s", `\[stack_test.go stack_test.go\]`, }, { stackTrace()[:2], "%v", `\[stack_test.go:174 stack_test.go:221\]`, }, { stackTrace()[:2], "%+v", "\n" + "github.com/pkg/errors.stackTrace\n" + "\t.+/github.com/pkg/errors/stack_test.go:174\n" + "github.com/pkg/errors.TestStackTraceFormat\n" + "\t.+/github.com/pkg/errors/stack_test.go:225", }, { stackTrace()[:2], "%#v", `\[\]errors.Frame{stack_test.go:174, stack_test.go:233}`, }} for i, tt := range tests { testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) } } // a version of runtime.Caller that returns a Frame, not a uintptr. func caller() Frame { var pcs [3]uintptr n := runtime.Callers(2, pcs[:]) frames := runtime.CallersFrames(pcs[:n]) frame, _ := frames.Next() return Frame(frame.PC) } dependencies/pkg/mod/github.com/redis/0000775000000000000000000000000015024302467016714 5ustar rootrootdependencies/pkg/mod/github.com/redis/go-redis/0000775000000000000000000000000015024302467020425 5ustar rootrootdependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/0000755000000000000000000000000015024302467021715 5ustar rootrootdependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/command_recorder_test.go0000644000000000000000000000376415024302467026620 0ustar rootrootpackage redis_test import ( "context" "strings" "sync" "github.com/redis/go-redis/v9" ) // commandRecorder records the last N commands executed by a Redis client. type commandRecorder struct { mu sync.Mutex commands []string maxSize int } // newCommandRecorder creates a new command recorder with the specified maximum size. func newCommandRecorder(maxSize int) *commandRecorder { return &commandRecorder{ commands: make([]string, 0, maxSize), maxSize: maxSize, } } // Record adds a command to the recorder. func (r *commandRecorder) Record(cmd string) { cmd = strings.ToLower(cmd) r.mu.Lock() defer r.mu.Unlock() r.commands = append(r.commands, cmd) if len(r.commands) > r.maxSize { r.commands = r.commands[1:] } } // LastCommands returns a copy of the recorded commands. func (r *commandRecorder) LastCommands() []string { r.mu.Lock() defer r.mu.Unlock() return append([]string(nil), r.commands...) } // Contains checks if the recorder contains a specific command. func (r *commandRecorder) Contains(cmd string) bool { cmd = strings.ToLower(cmd) r.mu.Lock() defer r.mu.Unlock() for _, c := range r.commands { if strings.Contains(c, cmd) { return true } } return false } // Hook returns a Redis hook that records commands. func (r *commandRecorder) Hook() redis.Hook { return &commandHook{recorder: r} } // commandHook implements the redis.Hook interface to record commands. type commandHook struct { recorder *commandRecorder } func (h *commandHook) DialHook(next redis.DialHook) redis.DialHook { return next } func (h *commandHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook { return func(ctx context.Context, cmd redis.Cmder) error { h.recorder.Record(cmd.String()) return next(ctx, cmd) } } func (h *commandHook) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook { return func(ctx context.Context, cmds []redis.Cmder) error { for _, cmd := range cmds { h.recorder.Record(cmd.String()) } return next(ctx, cmds) } } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/docker-compose.yml0000644000000000000000000000527715024302467025365 0ustar rootroot--- services: redis: image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} platform: linux/amd64 container_name: redis-standalone environment: - TLS_ENABLED=yes - REDIS_CLUSTER=no - PORT=6379 - TLS_PORT=6666 command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} ports: - 6379:6379 - 6666:6666 # TLS port volumes: - "./dockers/standalone:/redis/work" profiles: - standalone - sentinel - all-stack - all osscluster: image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} platform: linux/amd64 container_name: redis-osscluster environment: - NODES=6 - PORT=16600 command: "--cluster-enabled yes" ports: - "16600-16605:16600-16605" volumes: - "./dockers/osscluster:/redis/work" profiles: - cluster - all-stack - all sentinel-cluster: image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} platform: linux/amd64 container_name: redis-sentinel-cluster network_mode: "host" environment: - NODES=3 - TLS_ENABLED=yes - REDIS_CLUSTER=no - PORT=9121 command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} #ports: # - "9121-9123:9121-9123" volumes: - "./dockers/sentinel-cluster:/redis/work" profiles: - sentinel - all-stack - all sentinel: image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} platform: linux/amd64 container_name: redis-sentinel depends_on: - sentinel-cluster environment: - NODES=3 - REDIS_CLUSTER=no - PORT=26379 command: ${REDIS_EXTRA_ARGS:---sentinel} network_mode: "host" #ports: # - 26379:26379 # - 26380:26380 # - 26381:26381 volumes: - "./dockers/sentinel.conf:/redis/config-default/redis.conf" - "./dockers/sentinel:/redis/work" profiles: - sentinel - all-stack - all ring-cluster: image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} platform: linux/amd64 container_name: redis-ring-cluster environment: - NODES=3 - TLS_ENABLED=yes - REDIS_CLUSTER=no - PORT=6390 command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} ports: - 6390:6390 - 6391:6391 - 6392:6392 volumes: - "./dockers/ring:/redis/work" profiles: - ring - cluster - all-stack - all dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/internal_test.go0000644000000000000000000002051715024302467025124 0ustar rootrootpackage redis import ( "context" "fmt" "reflect" "sync" "sync/atomic" "testing" "time" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" . "github.com/bsm/ginkgo/v2" . "github.com/bsm/gomega" ) var _ = Describe("newClusterState", func() { var state *clusterState createClusterState := func(slots []ClusterSlot) *clusterState { opt := &ClusterOptions{} opt.init() nodes := newClusterNodes(opt) state, err := newClusterState(nodes, slots, "10.10.10.10:1234") Expect(err).NotTo(HaveOccurred()) return state } Describe("sorting", func() { BeforeEach(func() { state = createClusterState([]ClusterSlot{{ Start: 1000, End: 1999, }, { Start: 0, End: 999, }, { Start: 2000, End: 2999, }}) }) It("sorts slots", func() { Expect(state.slots).To(Equal([]*clusterSlot{ {start: 0, end: 999, nodes: nil}, {start: 1000, end: 1999, nodes: nil}, {start: 2000, end: 2999, nodes: nil}, })) }) }) Describe("loopback", func() { BeforeEach(func() { state = createClusterState([]ClusterSlot{{ Nodes: []ClusterNode{{Addr: "127.0.0.1:7001"}}, }, { Nodes: []ClusterNode{{Addr: "127.0.0.1:7002"}}, }, { Nodes: []ClusterNode{{Addr: "1.2.3.4:1234"}}, }, { Nodes: []ClusterNode{{Addr: ":1234"}}, }}) }) It("replaces loopback hosts in addresses", func() { slotAddr := func(slot *clusterSlot) string { return slot.nodes[0].Client.Options().Addr } Expect(slotAddr(state.slots[0])).To(Equal("10.10.10.10:7001")) Expect(slotAddr(state.slots[1])).To(Equal("10.10.10.10:7002")) Expect(slotAddr(state.slots[2])).To(Equal("1.2.3.4:1234")) Expect(slotAddr(state.slots[3])).To(Equal(":1234")) }) }) }) type fixedHash string func (h fixedHash) Get(string) string { return string(h) } func TestRingSetAddrsAndRebalanceRace(t *testing.T) { const ( ringShard1Name = "ringShardOne" ringShard2Name = "ringShardTwo" ringShard1Port = "6390" ringShard2Port = "6391" ) ring := NewRing(&RingOptions{ Addrs: map[string]string{ ringShard1Name: ":" + ringShard1Port, }, // Disable heartbeat HeartbeatFrequency: 1 * time.Hour, NewConsistentHash: func(shards []string) ConsistentHash { switch len(shards) { case 1: return fixedHash(ringShard1Name) case 2: return fixedHash(ringShard2Name) default: t.Fatalf("Unexpected number of shards: %v", shards) return nil } }, }) defer ring.Close() // Continuously update addresses by adding and removing one address updatesDone := make(chan struct{}) defer func() { close(updatesDone) }() go func() { for i := 0; ; i++ { select { case <-updatesDone: return default: if i%2 == 0 { ring.SetAddrs(map[string]string{ ringShard1Name: ":" + ringShard1Port, }) } else { ring.SetAddrs(map[string]string{ ringShard1Name: ":" + ringShard1Port, ringShard2Name: ":" + ringShard2Port, }) } } } }() timer := time.NewTimer(1 * time.Second) for running := true; running; { select { case <-timer.C: running = false default: shard, err := ring.sharding.GetByKey("whatever") if err == nil && shard == nil { t.Fatal("shard is nil") } } } } func BenchmarkRingShardingRebalanceLocked(b *testing.B) { opts := &RingOptions{ Addrs: make(map[string]string), // Disable heartbeat HeartbeatFrequency: 1 * time.Hour, } for i := 0; i < 100; i++ { opts.Addrs[fmt.Sprintf("shard%d", i)] = fmt.Sprintf(":63%02d", i) } ring := NewRing(opts) defer ring.Close() b.ResetTimer() for i := 0; i < b.N; i++ { ring.sharding.rebalanceLocked() } } type testCounter struct { mu sync.Mutex t *testing.T m map[string]int } func newTestCounter(t *testing.T) *testCounter { return &testCounter{t: t, m: make(map[string]int)} } func (ct *testCounter) increment(key string) { ct.mu.Lock() defer ct.mu.Unlock() ct.m[key]++ } func (ct *testCounter) expect(values map[string]int) { ct.mu.Lock() defer ct.mu.Unlock() ct.t.Helper() if !reflect.DeepEqual(values, ct.m) { ct.t.Errorf("expected %v != actual %v", values, ct.m) } } func TestRingShardsCleanup(t *testing.T) { const ( ringShard1Name = "ringShardOne" ringShard2Name = "ringShardTwo" ringShard1Addr = "shard1.test" ringShard2Addr = "shard2.test" ) t.Run("closes unused shards", func(t *testing.T) { closeCounter := newTestCounter(t) ring := NewRing(&RingOptions{ Addrs: map[string]string{ ringShard1Name: ringShard1Addr, ringShard2Name: ringShard2Addr, }, NewClient: func(opt *Options) *Client { c := NewClient(opt) c.baseClient.onClose = c.baseClient.wrappedOnClose(func() error { closeCounter.increment(opt.Addr) return nil }) return c }, }) closeCounter.expect(map[string]int{}) // no change due to the same addresses ring.SetAddrs(map[string]string{ ringShard1Name: ringShard1Addr, ringShard2Name: ringShard2Addr, }) closeCounter.expect(map[string]int{}) ring.SetAddrs(map[string]string{ ringShard1Name: ringShard1Addr, }) closeCounter.expect(map[string]int{ringShard2Addr: 1}) ring.SetAddrs(map[string]string{ ringShard2Name: ringShard2Addr, }) closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1}) ring.Close() closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 2}) }) t.Run("closes created shards if ring was closed", func(t *testing.T) { createCounter := newTestCounter(t) closeCounter := newTestCounter(t) var ( ring *Ring shouldClose int32 ) ring = NewRing(&RingOptions{ Addrs: map[string]string{ ringShard1Name: ringShard1Addr, }, NewClient: func(opt *Options) *Client { if atomic.LoadInt32(&shouldClose) != 0 { ring.Close() } createCounter.increment(opt.Addr) c := NewClient(opt) c.baseClient.onClose = c.baseClient.wrappedOnClose(func() error { closeCounter.increment(opt.Addr) return nil }) return c }, }) createCounter.expect(map[string]int{ringShard1Addr: 1}) closeCounter.expect(map[string]int{}) atomic.StoreInt32(&shouldClose, 1) ring.SetAddrs(map[string]string{ ringShard2Name: ringShard2Addr, }) createCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1}) closeCounter.expect(map[string]int{ringShard1Addr: 1, ringShard2Addr: 1}) }) } //------------------------------------------------------------------------------ type timeoutErr struct { error } func (e timeoutErr) Timeout() bool { return true } func (e timeoutErr) Temporary() bool { return true } func (e timeoutErr) Error() string { return "i/o timeout" } var _ = Describe("withConn", func() { var client *Client BeforeEach(func() { client = NewClient(&Options{ PoolSize: 1, }) }) AfterEach(func() { client.Close() }) It("should replace the connection in the pool when there is no error", func() { var conn *pool.Conn client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error { conn = c return nil }) newConn, err := client.connPool.Get(ctx) Expect(err).To(BeNil()) Expect(newConn).To(Equal(conn)) }) It("should replace the connection in the pool when there is an error not related to a bad connection", func() { var conn *pool.Conn client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error { conn = c return proto.RedisError("LOADING") }) newConn, err := client.connPool.Get(ctx) Expect(err).To(BeNil()) Expect(newConn).To(Equal(conn)) }) It("should remove the connection from the pool when it times out", func() { var conn *pool.Conn client.withConn(ctx, func(ctx context.Context, c *pool.Conn) error { conn = c return timeoutErr{} }) newConn, err := client.connPool.Get(ctx) Expect(err).To(BeNil()) Expect(newConn).NotTo(Equal(conn)) Expect(client.connPool.Len()).To(Equal(1)) }) }) var _ = Describe("ClusterClient", func() { var client *ClusterClient BeforeEach(func() { client = &ClusterClient{} }) Describe("cmdSlot", func() { It("select slot from args for GETKEYSINSLOT command", func() { cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", 100, 200) slot := client.cmdSlot(cmd) Expect(slot).To(Equal(100)) }) It("select slot from args for COUNTKEYSINSLOT command", func() { cmd := NewStringSliceCmd(ctx, "cluster", "countkeysinslot", 100) slot := client.cmdSlot(cmd) Expect(slot).To(Equal(100)) }) }) }) dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/json_test.go0000644000000000000000000007406115024302467024264 0ustar rootrootpackage redis_test import ( "context" "encoding/json" "time" . "github.com/bsm/ginkgo/v2" . "github.com/bsm/gomega" "github.com/redis/go-redis/v9" ) type JSONGetTestStruct struct { Hello string `json:"hello"` } var _ = Describe("JSON Commands", Label("json"), func() { ctx := context.TODO() var client *redis.Client setupRedisClient := func(protocolVersion int) *redis.Client { return redis.NewClient(&redis.Options{ Addr: "localhost:6379", DB: 0, Protocol: protocolVersion, UnstableResp3: true, }) } AfterEach(func() { if client != nil { client.FlushDB(ctx) client.Close() } }) protocols := []int{2, 3} for _, protocol := range protocols { BeforeEach(func() { client = setupRedisClient(protocol) Expect(client.FlushAll(ctx).Err()).NotTo(HaveOccurred()) }) Describe("arrays", Label("arrays"), func() { It("should JSONArrAppend", Label("json.arrappend", "json"), func() { cmd1 := client.JSONSet(ctx, "append2", "$", `{"a": [10], "b": {"a": [12, 13]}}`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONArrAppend(ctx, "append2", "$..a", 10) Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal([]int64{2, 3})) }) It("should JSONArrIndex and JSONArrIndexWithArgs", Label("json.arrindex", "json"), func() { cmd1, err := client.JSONSet(ctx, "index1", "$", `{"a": [10], "b": {"a": [12, 10]}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd1).To(Equal("OK")) cmd2, err := client.JSONArrIndex(ctx, "index1", "$.b.a", 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd2).To(Equal([]int64{1})) cmd3, err := client.JSONSet(ctx, "index2", "$", `[0,1,2,3,4]`).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd3).To(Equal("OK")) res, err := client.JSONArrIndex(ctx, "index2", "$", 1).Result() Expect(err).NotTo(HaveOccurred()) Expect(res[0]).To(Equal(int64(1))) res, err = client.JSONArrIndex(ctx, "index2", "$", 1, 2).Result() Expect(err).NotTo(HaveOccurred()) Expect(res[0]).To(Equal(int64(-1))) res, err = client.JSONArrIndex(ctx, "index2", "$", 4).Result() Expect(err).NotTo(HaveOccurred()) Expect(res[0]).To(Equal(int64(4))) res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{}, 4).Result() Expect(err).NotTo(HaveOccurred()) Expect(res[0]).To(Equal(int64(4))) stop := 5000 res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{Stop: &stop}, 4).Result() Expect(err).NotTo(HaveOccurred()) Expect(res[0]).To(Equal(int64(4))) stop = -1 res, err = client.JSONArrIndexWithArgs(ctx, "index2", "$", &redis.JSONArrIndexArgs{Stop: &stop}, 4).Result() Expect(err).NotTo(HaveOccurred()) Expect(res[0]).To(Equal(int64(-1))) }) It("should JSONArrIndex and JSONArrIndexWithArgs with $", Label("json.arrindex", "json"), func() { doc := `{ "store": { "book": [ { "category": "reference", "author": "Nigel Rees", "title": "Sayings of the Century", "price": 8.95, "size": [10, 20, 30, 40] }, { "category": "fiction", "author": "Evelyn Waugh", "title": "Sword of Honour", "price": 12.99, "size": [50, 60, 70, 80] }, { "category": "fiction", "author": "Herman Melville", "title": "Moby Dick", "isbn": "0-553-21311-3", "price": 8.99, "size": [5, 10, 20, 30] }, { "category": "fiction", "author": "J. R. R. Tolkien", "title": "The Lord of the Rings", "isbn": "0-395-19395-8", "price": 22.99, "size": [5, 6, 7, 8] } ], "bicycle": {"color": "red", "price": 19.95} } }` res, err := client.JSONSet(ctx, "doc1", "$", doc).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) resGet, err := client.JSONGet(ctx, "doc1", "$.store.book[?(@.price<10)].size").Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal("[[10,20,30,40],[5,10,20,30]]")) resArr, err := client.JSONArrIndex(ctx, "doc1", "$.store.book[?(@.price<10)].size", 20).Result() Expect(err).NotTo(HaveOccurred()) Expect(resArr).To(Equal([]int64{1, 2})) }) It("should JSONArrInsert", Label("json.arrinsert", "json"), func() { cmd1 := client.JSONSet(ctx, "insert2", "$", `[100, 200, 300, 200]`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONArrInsert(ctx, "insert2", "$", -1, 1, 2) Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal([]int64{6})) cmd3 := client.JSONGet(ctx, "insert2") Expect(cmd3.Err()).NotTo(HaveOccurred()) // RESP2 vs RESP3 Expect(cmd3.Val()).To(Or( Equal(`[100,200,300,1,2,200]`), Equal(`[[100,200,300,1,2,200]]`))) }) It("should JSONArrLen", Label("json.arrlen", "json"), func() { cmd1 := client.JSONSet(ctx, "length2", "$", `{"a": [10], "b": {"a": [12, 10, 20, 12, 90, 10]}}`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONArrLen(ctx, "length2", "$..a") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal([]int64{1, 6})) }) It("should JSONArrPop", Label("json.arrpop"), func() { cmd1 := client.JSONSet(ctx, "pop4", "$", `[100, 200, 300, 200]`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONArrPop(ctx, "pop4", "$", 2) Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal([]string{"300"})) cmd3 := client.JSONGet(ctx, "pop4", "$") Expect(cmd3.Err()).NotTo(HaveOccurred()) Expect(cmd3.Val()).To(Equal("[[100,200,200]]")) }) It("should JSONArrTrim", Label("json.arrtrim", "json"), func() { cmd1, err := client.JSONSet(ctx, "trim1", "$", `[0,1,2,3,4]`).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd1).To(Equal("OK")) stop := 3 cmd2, err := client.JSONArrTrimWithArgs(ctx, "trim1", "$", &redis.JSONArrTrimArgs{Start: 1, Stop: &stop}).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd2).To(Equal([]int64{3})) res, err := client.JSONGet(ctx, "trim1", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal(`[[1,2,3]]`)) cmd3, err := client.JSONSet(ctx, "trim2", "$", `[0,1,2,3,4]`).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd3).To(Equal("OK")) stop = 3 cmd4, err := client.JSONArrTrimWithArgs(ctx, "trim2", "$", &redis.JSONArrTrimArgs{Start: -1, Stop: &stop}).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd4).To(Equal([]int64{0})) cmd5, err := client.JSONSet(ctx, "trim3", "$", `[0,1,2,3,4]`).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd5).To(Equal("OK")) stop = 99 cmd6, err := client.JSONArrTrimWithArgs(ctx, "trim3", "$", &redis.JSONArrTrimArgs{Start: 3, Stop: &stop}).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd6).To(Equal([]int64{2})) cmd7, err := client.JSONSet(ctx, "trim4", "$", `[0,1,2,3,4]`).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd7).To(Equal("OK")) stop = 1 cmd8, err := client.JSONArrTrimWithArgs(ctx, "trim4", "$", &redis.JSONArrTrimArgs{Start: 9, Stop: &stop}).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd8).To(Equal([]int64{0})) cmd9, err := client.JSONSet(ctx, "trim5", "$", `[0,1,2,3,4]`).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd9).To(Equal("OK")) stop = 11 cmd10, err := client.JSONArrTrimWithArgs(ctx, "trim5", "$", &redis.JSONArrTrimArgs{Start: 9, Stop: &stop}).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd10).To(Equal([]int64{0})) }) It("should JSONArrPop", Label("json.arrpop", "json"), func() { cmd1 := client.JSONSet(ctx, "pop4", "$", `[100, 200, 300, 200]`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONArrPop(ctx, "pop4", "$", 2) Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal([]string{"300"})) cmd3 := client.JSONGet(ctx, "pop4", "$") Expect(cmd3.Err()).NotTo(HaveOccurred()) Expect(cmd3.Val()).To(Equal("[[100,200,200]]")) }) }) Describe("get/set", Label("getset"), func() { It("should JSONSet", Label("json.set", "json"), func() { cmd := client.JSONSet(ctx, "set1", "$", `{"a": 1, "b": 2, "hello": "world"}`) Expect(cmd.Err()).NotTo(HaveOccurred()) Expect(cmd.Val()).To(Equal("OK")) }) It("should JSONGet", Label("json.get", "json", "NonRedisEnterprise"), func() { res, err := client.JSONSet(ctx, "get3", "$", `{"a": 1, "b": 2}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) res, err = client.JSONGetWithArgs(ctx, "get3", &redis.JSONGetArgs{Indent: "-"}).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal(`{-"a":1,-"b":2}`)) res, err = client.JSONGetWithArgs(ctx, "get3", &redis.JSONGetArgs{Indent: "-", Newline: `~`, Space: `!`}).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal(`{~-"a":!1,~-"b":!2~}`)) }) It("should JSONMerge", Label("json.merge", "json"), func() { res, err := client.JSONSet(ctx, "merge1", "$", `{"a": 1, "b": 2}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) res, err = client.JSONMerge(ctx, "merge1", "$", `{"b": 3, "c": 4}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) res, err = client.JSONGet(ctx, "merge1", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal(`[{"a":1,"b":3,"c":4}]`)) }) It("should JSONMSet", Label("json.mset", "json", "NonRedisEnterprise"), func() { doc1 := redis.JSONSetArgs{Key: "mset1", Path: "$", Value: `{"a": 1}`} doc2 := redis.JSONSetArgs{Key: "mset2", Path: "$", Value: 2} docs := []redis.JSONSetArgs{doc1, doc2} mSetResult, err := client.JSONMSetArgs(ctx, docs).Result() Expect(err).NotTo(HaveOccurred()) Expect(mSetResult).To(Equal("OK")) res, err := client.JSONMGet(ctx, "$", "mset1").Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal([]interface{}{`[{"a":1}]`})) res, err = client.JSONMGet(ctx, "$", "mset1", "mset2").Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal([]interface{}{`[{"a":1}]`, "[2]"})) _, err = client.JSONMSet(ctx, "mset1", "$.a", 2, "mset3", "$", `[1]`).Result() Expect(err).NotTo(HaveOccurred()) }) It("should JSONMGet", Label("json.mget", "json", "NonRedisEnterprise"), func() { cmd1 := client.JSONSet(ctx, "mget2a", "$", `{"a": ["aa", "ab", "ac", "ad"], "b": {"a": ["ba", "bb", "bc", "bd"]}}`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONSet(ctx, "mget2b", "$", `{"a": [100, 200, 300, 200], "b": {"a": [100, 200, 300, 200]}}`) Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal("OK")) cmd3 := client.JSONMGet(ctx, "$..a", "mget2a", "mget2b") Expect(cmd3.Err()).NotTo(HaveOccurred()) Expect(cmd3.Val()).To(HaveLen(2)) Expect(cmd3.Val()[0]).To(Equal(`[["aa","ab","ac","ad"],["ba","bb","bc","bd"]]`)) Expect(cmd3.Val()[1]).To(Equal(`[[100,200,300,200],[100,200,300,200]]`)) }) It("should JSONMget with $", Label("json.mget", "json", "NonRedisEnterprise"), func() { res, err := client.JSONSet(ctx, "doc1", "$", `{"a": 1, "b": 2, "nested": {"a": 3}, "c": "", "nested2": {"a": ""}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) res, err = client.JSONSet(ctx, "doc2", "$", `{"a": 4, "b": 5, "nested": {"a": 6}, "c": "", "nested2": {"a": [""]}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err := client.JSONMGet(ctx, "$..a", "doc1").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal([]interface{}{`[1,3,""]`})) iRes, err = client.JSONMGet(ctx, "$..a", "doc1", "doc2").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal([]interface{}{`[1,3,""]`, `[4,6,[""]]`})) iRes, err = client.JSONMGet(ctx, "$..a", "non_existing_doc", "non_existing_doc1").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal([]interface{}{nil, nil})) }) }) Describe("Misc", Label("misc"), func() { It("should JSONClear", Label("json.clear", "json"), func() { cmd1 := client.JSONSet(ctx, "clear1", "$", `[1]`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONClear(ctx, "clear1", "$") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal(int64(1))) cmd3 := client.JSONGet(ctx, "clear1", "$") Expect(cmd3.Err()).NotTo(HaveOccurred()) Expect(cmd3.Val()).To(Equal(`[[]]`)) }) It("should JSONClear with $", Label("json.clear", "json"), func() { doc := `{ "nested1": {"a": {"foo": 10, "bar": 20}}, "a": ["foo"], "nested2": {"a": "claro"}, "nested3": {"a": {"baz": 50}} }` res, err := client.JSONSet(ctx, "doc1", "$", doc).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err := client.JSONClear(ctx, "doc1", "$..a").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal(int64(3))) resGet, err := client.JSONGet(ctx, "doc1", `$`).Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal(`[{"nested1":{"a":{}},"a":[],"nested2":{"a":"claro"},"nested3":{"a":{}}}]`)) res, err = client.JSONSet(ctx, "doc1", "$", doc).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err = client.JSONClear(ctx, "doc1", "$.nested1.a").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal(int64(1))) resGet, err = client.JSONGet(ctx, "doc1", `$`).Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal(`[{"nested1":{"a":{}},"a":["foo"],"nested2":{"a":"claro"},"nested3":{"a":{"baz":50}}}]`)) }) It("should JSONDel", Label("json.del", "json"), func() { cmd1 := client.JSONSet(ctx, "del1", "$", `[1]`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONDel(ctx, "del1", "$") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal(int64(1))) cmd3 := client.JSONGet(ctx, "del1", "$") Expect(cmd3.Err()).NotTo(HaveOccurred()) Expect(cmd3.Val()).To(HaveLen(0)) }) It("should JSONDel with $", Label("json.del", "json"), func() { res, err := client.JSONSet(ctx, "del1", "$", `{"a": 1, "nested": {"a": 2, "b": 3}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err := client.JSONDel(ctx, "del1", "$..a").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal(int64(2))) resGet, err := client.JSONGet(ctx, "del1", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal(`[{"nested":{"b":3}}]`)) res, err = client.JSONSet(ctx, "del2", "$", `{"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [true, "a", "b"]}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err = client.JSONDel(ctx, "del2", "$..a").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal(int64(1))) resGet, err = client.JSONGet(ctx, "del2", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal(`[{"nested":{"b":[true,"a","b"]},"b":["a","b"]}]`)) doc := `[ { "ciao": ["non ancora"], "nested": [ {"ciao": [1, "a"]}, {"ciao": [2, "a"]}, {"ciaoc": [3, "non", "ciao"]}, {"ciao": [4, "a"]}, {"e": [5, "non", "ciao"]} ] } ]` res, err = client.JSONSet(ctx, "del3", "$", doc).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err = client.JSONDel(ctx, "del3", `$.[0]["nested"]..ciao`).Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal(int64(3))) resVal := `[[{"ciao":["non ancora"],"nested":[{},{},{"ciaoc":[3,"non","ciao"]},{},{"e":[5,"non","ciao"]}]}]]` resGet, err = client.JSONGet(ctx, "del3", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal(resVal)) }) It("should JSONForget", Label("json.forget", "json"), func() { cmd1 := client.JSONSet(ctx, "forget3", "$", `{"a": [1,2,3], "b": {"a": [1,2,3], "b": "annie"}}`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONForget(ctx, "forget3", "$..a") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal(int64(2))) cmd3 := client.JSONGet(ctx, "forget3", "$") Expect(cmd3.Err()).NotTo(HaveOccurred()) Expect(cmd3.Val()).To(Equal(`[{"b":{"b":"annie"}}]`)) }) It("should JSONForget with $", Label("json.forget", "json"), func() { res, err := client.JSONSet(ctx, "doc1", "$", `{"a": 1, "nested": {"a": 2, "b": 3}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err := client.JSONForget(ctx, "doc1", "$..a").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal(int64(2))) resGet, err := client.JSONGet(ctx, "doc1", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal(`[{"nested":{"b":3}}]`)) res, err = client.JSONSet(ctx, "doc2", "$", `{"a": {"a": 2, "b": 3}, "b": ["a", "b"], "nested": {"b": [true, "a", "b"]}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err = client.JSONForget(ctx, "doc2", "$..a").Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal(int64(1))) resGet, err = client.JSONGet(ctx, "doc2", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal(`[{"nested":{"b":[true,"a","b"]},"b":["a","b"]}]`)) doc := `[ { "ciao": ["non ancora"], "nested": [ {"ciao": [1, "a"]}, {"ciao": [2, "a"]}, {"ciaoc": [3, "non", "ciao"]}, {"ciao": [4, "a"]}, {"e": [5, "non", "ciao"]} ] } ]` res, err = client.JSONSet(ctx, "doc3", "$", doc).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) iRes, err = client.JSONForget(ctx, "doc3", `$.[0]["nested"]..ciao`).Result() Expect(err).NotTo(HaveOccurred()) Expect(iRes).To(Equal(int64(3))) resVal := `[[{"ciao":["non ancora"],"nested":[{},{},{"ciaoc":[3,"non","ciao"]},{},{"e":[5,"non","ciao"]}]}]]` resGet, err = client.JSONGet(ctx, "doc3", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(resGet).To(Equal(resVal)) }) It("should JSONNumIncrBy", Label("json.numincrby", "json"), func() { cmd1 := client.JSONSet(ctx, "incr3", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONNumIncrBy(ctx, "incr3", "$..a[1]", float64(1)) Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(Equal(`[3,0]`)) }) It("should JSONNumIncrBy with $", Label("json.numincrby", "json"), func() { res, err := client.JSONSet(ctx, "doc1", "$", `{"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) res, err = client.JSONNumIncrBy(ctx, "doc1", "$.b[1].a", 2).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal(`[7]`)) res, err = client.JSONNumIncrBy(ctx, "doc1", "$.b[1].a", 3.5).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal(`[10.5]`)) res, err = client.JSONSet(ctx, "doc2", "$", `{"a": "b", "b": [{"a": 2}, {"a": 5.0}, {"a": "c"}]}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) res, err = client.JSONNumIncrBy(ctx, "doc2", "$.b[0].a", 3).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal(`[5]`)) }) It("should JSONObjKeys", Label("json.objkeys", "json"), func() { cmd1 := client.JSONSet(ctx, "objkeys1", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONObjKeys(ctx, "objkeys1", "$..*") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(HaveLen(7)) Expect(cmd2.Val()).To(Equal([]interface{}{nil, []interface{}{"a"}, nil, nil, nil, nil, nil})) }) It("should JSONObjKeys with $", Label("json.objkeys", "json"), func() { doc := `{ "nested1": {"a": {"foo": 10, "bar": 20}}, "a": ["foo"], "nested2": {"a": {"baz": 50}} }` cmd1, err := client.JSONSet(ctx, "objkeys1", "$", doc).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd1).To(Equal("OK")) cmd2, err := client.JSONObjKeys(ctx, "objkeys1", "$.nested1.a").Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd2).To(Equal([]interface{}{[]interface{}{"foo", "bar"}})) cmd2, err = client.JSONObjKeys(ctx, "objkeys1", ".*.a").Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd2).To(Equal([]interface{}{"foo", "bar"})) cmd2, err = client.JSONObjKeys(ctx, "objkeys1", ".nested2.a").Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd2).To(Equal([]interface{}{"baz"})) _, err = client.JSONObjKeys(ctx, "non_existing_doc", "..a").Result() Expect(err).To(HaveOccurred()) }) It("should JSONObjLen", Label("json.objlen", "json"), func() { cmd1 := client.JSONSet(ctx, "objlen2", "$", `{"a": [1, 2], "b": {"a": [0, -1]}}`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONObjLen(ctx, "objlen2", "$..*") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(HaveLen(7)) Expect(cmd2.Val()[0]).To(BeNil()) Expect(*cmd2.Val()[1]).To(Equal(int64(1))) }) It("should JSONStrLen", Label("json.strlen", "json"), func() { cmd1 := client.JSONSet(ctx, "strlen2", "$", `{"a": "alice", "b": "bob", "c": {"a": "alice", "b": "bob"}}`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONStrLen(ctx, "strlen2", "$..*") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(HaveLen(5)) var tmp int64 = 20 Expect(cmd2.Val()[0]).To(BeAssignableToTypeOf(&tmp)) Expect(*cmd2.Val()[0]).To(Equal(int64(5))) Expect(*cmd2.Val()[1]).To(Equal(int64(3))) Expect(cmd2.Val()[2]).To(BeNil()) Expect(*cmd2.Val()[3]).To(Equal(int64(5))) Expect(*cmd2.Val()[4]).To(Equal(int64(3))) }) It("should JSONStrAppend", Label("json.strappend", "json"), func() { cmd1, err := client.JSONSet(ctx, "strapp1", "$", `"foo"`).Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd1).To(Equal("OK")) cmd2, err := client.JSONStrAppend(ctx, "strapp1", "$", `"bar"`).Result() Expect(err).NotTo(HaveOccurred()) Expect(*cmd2[0]).To(Equal(int64(6))) cmd3, err := client.JSONGet(ctx, "strapp1", "$").Result() Expect(err).NotTo(HaveOccurred()) Expect(cmd3).To(Equal(`["foobar"]`)) }) It("should JSONStrAppend and JSONStrLen with $", Label("json.strappend", "json.strlen", "json"), func() { res, err := client.JSONSet(ctx, "doc1", "$", `{"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) intArrayResult, err := client.JSONStrAppend(ctx, "doc1", "$.nested1.a", `"baz"`).Result() Expect(err).NotTo(HaveOccurred()) Expect(*intArrayResult[0]).To(Equal(int64(8))) res, err = client.JSONSet(ctx, "doc2", "$", `{"a": "foo", "nested1": {"a": "hello"}, "nested2": {"a": 31}}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) intResult, err := client.JSONStrLen(ctx, "doc2", "$.nested1.a").Result() Expect(err).NotTo(HaveOccurred()) Expect(*intResult[0]).To(Equal(int64(5))) }) It("should JSONToggle", Label("json.toggle", "json"), func() { cmd1 := client.JSONSet(ctx, "toggle1", "$", `[true]`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONToggle(ctx, "toggle1", "$[0]") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(HaveLen(1)) Expect(*cmd2.Val()[0]).To(Equal(int64(0))) }) It("should JSONType", Label("json.type", "json"), func() { cmd1 := client.JSONSet(ctx, "type1", "$", `[true]`) Expect(cmd1.Err()).NotTo(HaveOccurred()) Expect(cmd1.Val()).To(Equal("OK")) cmd2 := client.JSONType(ctx, "type1", "$[0]") Expect(cmd2.Err()).NotTo(HaveOccurred()) Expect(cmd2.Val()).To(HaveLen(1)) // RESP2 v RESP3 Expect(cmd2.Val()[0]).To(Or(Equal([]interface{}{"boolean"}), Equal("boolean"))) }) }) } }) var _ = Describe("Go-Redis Advanced JSON and RediSearch Tests", func() { var client *redis.Client var ctx = context.Background() setupRedisClient := func(protocolVersion int) *redis.Client { return redis.NewClient(&redis.Options{ Addr: "localhost:6379", DB: 0, Protocol: protocolVersion, // Setting RESP2 or RESP3 protocol UnstableResp3: true, // Enable RESP3 features }) } AfterEach(func() { if client != nil { client.FlushDB(ctx) client.Close() } }) Context("when testing with RESP2 and RESP3", func() { protocols := []int{2, 3} for _, protocol := range protocols { When("using protocol version", func() { BeforeEach(func() { client = setupRedisClient(protocol) }) It("should perform complex JSON and RediSearch operations", func() { jsonDoc := map[string]interface{}{ "person": map[string]interface{}{ "name": "Alice", "age": 30, "status": true, "address": map[string]interface{}{ "city": "Wonderland", "postcode": "12345", }, "contacts": []map[string]interface{}{ {"type": "email", "value": "alice@example.com"}, {"type": "phone", "value": "+123456789"}, {"type": "fax", "value": "+987654321"}, }, "friends": []map[string]interface{}{ {"name": "Bob", "age": 35, "status": true}, {"name": "Charlie", "age": 28, "status": false}, }, }, "settings": map[string]interface{}{ "notifications": map[string]interface{}{ "email": true, "sms": false, "alerts": []string{"low battery", "door open"}, }, "theme": "dark", }, } setCmd := client.JSONSet(ctx, "person:1", ".", jsonDoc) Expect(setCmd.Err()).NotTo(HaveOccurred(), "JSON.SET failed") getCmdRaw := client.JSONGet(ctx, "person:1", ".") rawJSON, err := getCmdRaw.Result() Expect(err).NotTo(HaveOccurred(), "JSON.GET (raw) failed") GinkgoWriter.Printf("Raw JSON: %s\n", rawJSON) getCmdExpanded := client.JSONGet(ctx, "person:1", ".") expandedJSON, err := getCmdExpanded.Expanded() Expect(err).NotTo(HaveOccurred(), "JSON.GET (expanded) failed") GinkgoWriter.Printf("Expanded JSON: %+v\n", expandedJSON) Expect(rawJSON).To(MatchJSON(jsonMustMarshal(expandedJSON))) arrAppendCmd := client.JSONArrAppend(ctx, "person:1", "$.person.contacts", `{"type": "social", "value": "@alice_wonder"}`) Expect(arrAppendCmd.Err()).NotTo(HaveOccurred(), "JSON.ARRAPPEND failed") arrLenCmd := client.JSONArrLen(ctx, "person:1", "$.person.contacts") arrLen, err := arrLenCmd.Result() Expect(err).NotTo(HaveOccurred(), "JSON.ARRLEN failed") Expect(arrLen).To(Equal([]int64{4}), "Array length mismatch after append") arrInsertCmd := client.JSONArrInsert(ctx, "person:1", "$.person.friends", 1, `{"name": "Diana", "age": 25, "status": true}`) Expect(arrInsertCmd.Err()).NotTo(HaveOccurred(), "JSON.ARRINSERT failed") start := 0 stop := 1 arrTrimCmd := client.JSONArrTrimWithArgs(ctx, "person:1", "$.person.friends", &redis.JSONArrTrimArgs{Start: start, Stop: &stop}) Expect(arrTrimCmd.Err()).NotTo(HaveOccurred(), "JSON.ARRTRIM failed") mergeData := map[string]interface{}{ "status": false, "nickname": "WonderAlice", "lastLogin": time.Now().Format(time.RFC3339), } mergeCmd := client.JSONMerge(ctx, "person:1", "$.person", jsonMustMarshal(mergeData)) Expect(mergeCmd.Err()).NotTo(HaveOccurred(), "JSON.MERGE failed") typeCmd := client.JSONType(ctx, "person:1", "$.person.nickname") nicknameType, err := typeCmd.Result() Expect(err).NotTo(HaveOccurred(), "JSON.TYPE failed") Expect(nicknameType[0]).To(Equal([]interface{}{"string"}), "JSON.TYPE mismatch for nickname") createIndexCmd := client.Do(ctx, "FT.CREATE", "person_idx", "ON", "JSON", "PREFIX", "1", "person:", "SCHEMA", "$.person.name", "AS", "name", "TEXT", "$.person.age", "AS", "age", "NUMERIC", "$.person.address.city", "AS", "city", "TEXT", "$.person.contacts[*].value", "AS", "contact_value", "TEXT", ) Expect(createIndexCmd.Err()).NotTo(HaveOccurred(), "FT.CREATE failed") searchCmd := client.FTSearchWithArgs(ctx, "person_idx", "@contact_value:(alice\\@example\\.com alice_wonder)", &redis.FTSearchOptions{Return: []redis.FTSearchReturn{{FieldName: "$.person.name"}, {FieldName: "$.person.age"}, {FieldName: "$.person.address.city"}}}) searchResult, err := searchCmd.Result() Expect(err).NotTo(HaveOccurred(), "FT.SEARCH failed") GinkgoWriter.Printf("Advanced Search result: %+v\n", searchResult) incrCmd := client.JSONNumIncrBy(ctx, "person:1", "$.person.age", 5) incrResult, err := incrCmd.Result() Expect(err).NotTo(HaveOccurred(), "JSON.NUMINCRBY failed") Expect(incrResult).To(Equal("[35]"), "Age increment mismatch") delCmd := client.JSONDel(ctx, "person:1", "$.settings.notifications.email") Expect(delCmd.Err()).NotTo(HaveOccurred(), "JSON.DEL failed") typeCmd = client.JSONType(ctx, "person:1", "$.settings.notifications.email") typeResult, err := typeCmd.Result() Expect(err).ToNot(HaveOccurred()) Expect(typeResult[0]).To(BeEmpty(), "Expected JSON.TYPE to be empty for deleted field") }) }) } }) }) // Helper function to marshal data into JSON for comparisons func jsonMustMarshal(v interface{}) string { bytes, err := json.Marshal(v) Expect(err).NotTo(HaveOccurred()) return string(bytes) } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/universal.go0000644000000000000000000001730215024302467024257 0ustar rootrootpackage redis import ( "context" "crypto/tls" "net" "time" ) // UniversalOptions information is required by UniversalClient to establish // connections. type UniversalOptions struct { // Either a single address or a seed list of host:port addresses // of cluster/sentinel nodes. Addrs []string // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. ClientName string // Database to be selected after connecting to the server. // Only single-node and failover clients. DB int // Common options. Dialer func(ctx context.Context, network, addr string) (net.Conn, error) OnConnect func(ctx context.Context, cn *Conn) error Protocol int Username string Password string SentinelUsername string SentinelPassword string MaxRetries int MinRetryBackoff time.Duration MaxRetryBackoff time.Duration DialTimeout time.Duration ReadTimeout time.Duration WriteTimeout time.Duration ContextTimeoutEnabled bool // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). PoolFIFO bool PoolSize int PoolTimeout time.Duration MinIdleConns int MaxIdleConns int MaxActiveConns int ConnMaxIdleTime time.Duration ConnMaxLifetime time.Duration TLSConfig *tls.Config // Only cluster clients. MaxRedirects int ReadOnly bool RouteByLatency bool RouteRandomly bool // MasterName is the sentinel master name. // Only for failover clients. MasterName string // DisableIndentity - Disable set-lib on connect. // // default: false // // Deprecated: Use DisableIdentity instead. DisableIndentity bool // DisableIdentity is used to disable CLIENT SETINFO command on connect. // // default: false DisableIdentity bool IdentitySuffix string UnstableResp3 bool // IsClusterMode can be used when only one Addrs is provided (e.g. Elasticache supports setting up cluster mode with configuration endpoint). IsClusterMode bool } // Cluster returns cluster options created from the universal options. func (o *UniversalOptions) Cluster() *ClusterOptions { if len(o.Addrs) == 0 { o.Addrs = []string{"127.0.0.1:6379"} } return &ClusterOptions{ Addrs: o.Addrs, ClientName: o.ClientName, Dialer: o.Dialer, OnConnect: o.OnConnect, Protocol: o.Protocol, Username: o.Username, Password: o.Password, MaxRedirects: o.MaxRedirects, ReadOnly: o.ReadOnly, RouteByLatency: o.RouteByLatency, RouteRandomly: o.RouteRandomly, MaxRetries: o.MaxRetries, MinRetryBackoff: o.MinRetryBackoff, MaxRetryBackoff: o.MaxRetryBackoff, DialTimeout: o.DialTimeout, ReadTimeout: o.ReadTimeout, WriteTimeout: o.WriteTimeout, ContextTimeoutEnabled: o.ContextTimeoutEnabled, PoolFIFO: o.PoolFIFO, PoolSize: o.PoolSize, PoolTimeout: o.PoolTimeout, MinIdleConns: o.MinIdleConns, MaxIdleConns: o.MaxIdleConns, MaxActiveConns: o.MaxActiveConns, ConnMaxIdleTime: o.ConnMaxIdleTime, ConnMaxLifetime: o.ConnMaxLifetime, TLSConfig: o.TLSConfig, DisableIdentity: o.DisableIdentity, DisableIndentity: o.DisableIndentity, IdentitySuffix: o.IdentitySuffix, UnstableResp3: o.UnstableResp3, } } // Failover returns failover options created from the universal options. func (o *UniversalOptions) Failover() *FailoverOptions { if len(o.Addrs) == 0 { o.Addrs = []string{"127.0.0.1:26379"} } return &FailoverOptions{ SentinelAddrs: o.Addrs, MasterName: o.MasterName, ClientName: o.ClientName, Dialer: o.Dialer, OnConnect: o.OnConnect, DB: o.DB, Protocol: o.Protocol, Username: o.Username, Password: o.Password, SentinelUsername: o.SentinelUsername, SentinelPassword: o.SentinelPassword, RouteByLatency: o.RouteByLatency, RouteRandomly: o.RouteRandomly, MaxRetries: o.MaxRetries, MinRetryBackoff: o.MinRetryBackoff, MaxRetryBackoff: o.MaxRetryBackoff, DialTimeout: o.DialTimeout, ReadTimeout: o.ReadTimeout, WriteTimeout: o.WriteTimeout, ContextTimeoutEnabled: o.ContextTimeoutEnabled, PoolFIFO: o.PoolFIFO, PoolSize: o.PoolSize, PoolTimeout: o.PoolTimeout, MinIdleConns: o.MinIdleConns, MaxIdleConns: o.MaxIdleConns, MaxActiveConns: o.MaxActiveConns, ConnMaxIdleTime: o.ConnMaxIdleTime, ConnMaxLifetime: o.ConnMaxLifetime, TLSConfig: o.TLSConfig, ReplicaOnly: o.ReadOnly, DisableIdentity: o.DisableIdentity, DisableIndentity: o.DisableIndentity, IdentitySuffix: o.IdentitySuffix, UnstableResp3: o.UnstableResp3, } } // Simple returns basic options created from the universal options. func (o *UniversalOptions) Simple() *Options { addr := "127.0.0.1:6379" if len(o.Addrs) > 0 { addr = o.Addrs[0] } return &Options{ Addr: addr, ClientName: o.ClientName, Dialer: o.Dialer, OnConnect: o.OnConnect, DB: o.DB, Protocol: o.Protocol, Username: o.Username, Password: o.Password, MaxRetries: o.MaxRetries, MinRetryBackoff: o.MinRetryBackoff, MaxRetryBackoff: o.MaxRetryBackoff, DialTimeout: o.DialTimeout, ReadTimeout: o.ReadTimeout, WriteTimeout: o.WriteTimeout, ContextTimeoutEnabled: o.ContextTimeoutEnabled, PoolFIFO: o.PoolFIFO, PoolSize: o.PoolSize, PoolTimeout: o.PoolTimeout, MinIdleConns: o.MinIdleConns, MaxIdleConns: o.MaxIdleConns, MaxActiveConns: o.MaxActiveConns, ConnMaxIdleTime: o.ConnMaxIdleTime, ConnMaxLifetime: o.ConnMaxLifetime, TLSConfig: o.TLSConfig, DisableIdentity: o.DisableIdentity, DisableIndentity: o.DisableIndentity, IdentitySuffix: o.IdentitySuffix, UnstableResp3: o.UnstableResp3, } } // -------------------------------------------------------------------- // UniversalClient is an abstract client which - based on the provided options - // represents either a ClusterClient, a FailoverClient, or a single-node Client. // This can be useful for testing cluster-specific applications locally or having different // clients in different environments. type UniversalClient interface { Cmdable AddHook(Hook) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error Do(ctx context.Context, args ...interface{}) *Cmd Process(ctx context.Context, cmd Cmder) error Subscribe(ctx context.Context, channels ...string) *PubSub PSubscribe(ctx context.Context, channels ...string) *PubSub SSubscribe(ctx context.Context, channels ...string) *PubSub Close() error PoolStats() *PoolStats } var ( _ UniversalClient = (*Client)(nil) _ UniversalClient = (*ClusterClient)(nil) _ UniversalClient = (*Ring)(nil) ) // NewUniversalClient returns a new multi client. The type of the returned client depends // on the following conditions: // // 1. If the MasterName option is specified with RouteByLatency, RouteRandomly or IsClusterMode, // a FailoverClusterClient is returned. // 2. If the MasterName option is specified without RouteByLatency, RouteRandomly or IsClusterMode, // a sentinel-backed FailoverClient is returned. // 3. If the number of Addrs is two or more, or IsClusterMode option is specified, // a ClusterClient is returned. // 4. Otherwise, a single-node Client is returned. func NewUniversalClient(opts *UniversalOptions) UniversalClient { if opts == nil { panic("redis: NewUniversalClient nil options") } switch { case opts.MasterName != "" && (opts.RouteByLatency || opts.RouteRandomly || opts.IsClusterMode): return NewFailoverClusterClient(opts.Failover()) case opts.MasterName != "": return NewFailoverClient(opts.Failover()) case len(opts.Addrs) > 1 || opts.IsClusterMode: return NewClusterClient(opts.Cluster()) default: return NewClient(opts.Simple()) } } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/acl_commands.go0000644000000000000000000000453115024302467024667 0ustar rootrootpackage redis import "context" type ACLCmdable interface { ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd ACLLog(ctx context.Context, count int64) *ACLLogCmd ACLLogReset(ctx context.Context) *StatusCmd ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd ACLDelUser(ctx context.Context, username string) *IntCmd ACLList(ctx context.Context) *StringSliceCmd ACLCat(ctx context.Context) *StringSliceCmd ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd } type ACLCatArgs struct { Category string } func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd { args := make([]interface{}, 0, 3+len(command)) args = append(args, "acl", "dryrun", username) args = append(args, command...) cmd := NewStringCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) ACLLog(ctx context.Context, count int64) *ACLLogCmd { args := make([]interface{}, 0, 3) args = append(args, "acl", "log") if count > 0 { args = append(args, count) } cmd := NewACLLogCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd { cmd := NewStatusCmd(ctx, "acl", "log", "reset") _ = c(ctx, cmd) return cmd } func (c cmdable) ACLDelUser(ctx context.Context, username string) *IntCmd { cmd := NewIntCmd(ctx, "acl", "deluser", username) _ = c(ctx, cmd) return cmd } func (c cmdable) ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd { args := make([]interface{}, 3+len(rules)) args[0] = "acl" args[1] = "setuser" args[2] = username for i, rule := range rules { args[i+3] = rule } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) ACLList(ctx context.Context) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "acl", "list") _ = c(ctx, cmd) return cmd } func (c cmdable) ACLCat(ctx context.Context) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "acl", "cat") _ = c(ctx, cmd) return cmd } func (c cmdable) ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd { // if there is a category passed, build new cmd, if there isn't - use the ACLCat method if options != nil && options.Category != "" { cmd := NewStringSliceCmd(ctx, "acl", "cat", options.Category) _ = c(ctx, cmd) return cmd } return c.ACLCat(ctx) } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/export_test.go0000644000000000000000000000446215024302467024632 0ustar rootrootpackage redis import ( "context" "fmt" "net" "strings" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hashtag" "github.com/redis/go-redis/v9/internal/pool" ) func (c *baseClient) Pool() pool.Pooler { return c.connPool } func (c *PubSub) SetNetConn(netConn net.Conn) { c.cn = pool.NewConn(netConn) } func (c *ClusterClient) LoadState(ctx context.Context) (*clusterState, error) { // return c.state.Reload(ctx) return c.loadState(ctx) } func (c *ClusterClient) SlotAddrs(ctx context.Context, slot int) []string { state, err := c.state.Get(ctx) if err != nil { panic(err) } var addrs []string for _, n := range state.slotNodes(slot) { addrs = append(addrs, n.Client.getAddr()) } return addrs } func (c *ClusterClient) Nodes(ctx context.Context, key string) ([]*clusterNode, error) { state, err := c.state.Reload(ctx) if err != nil { return nil, err } slot := hashtag.Slot(key) nodes := state.slotNodes(slot) if len(nodes) != 2 { return nil, fmt.Errorf("slot=%d does not have enough nodes: %v", slot, nodes) } return nodes, nil } func (c *ClusterClient) SwapNodes(ctx context.Context, key string) error { nodes, err := c.Nodes(ctx, key) if err != nil { return err } nodes[0], nodes[1] = nodes[1], nodes[0] return nil } func (c *clusterState) IsConsistent(ctx context.Context) bool { if len(c.Masters) < 3 { return false } for _, master := range c.Masters { s := master.Client.Info(ctx, "replication").Val() if !strings.Contains(s, "role:master") { return false } } if len(c.Slaves) < 3 { return false } for _, slave := range c.Slaves { s := slave.Client.Info(ctx, "replication").Val() if !strings.Contains(s, "role:slave") { return false } } return true } func GetSlavesAddrByName(ctx context.Context, c *SentinelClient, name string) []string { addrs, err := c.Replicas(ctx, name).Result() if err != nil { internal.Logger.Printf(ctx, "sentinel: Replicas name=%q failed: %s", name, err) return []string{} } return parseReplicaAddrs(addrs, false) } func (c *Ring) ShardByName(name string) *ringShard { shard, _ := c.sharding.GetByName(name) return shard } func (c *ModuleLoadexConfig) ToArgs() []interface{} { return c.toArgs() } func ShouldRetry(err error, retryTimeout bool) bool { return shouldRetry(err, retryTimeout) } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/list_commands.go0000644000000000000000000002140315024302467025100 0ustar rootrootpackage redis import ( "context" "strings" "time" ) type ListCmdable interface { BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd LIndex(ctx context.Context, key string, index int64) *StringCmd LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd LLen(ctx context.Context, key string) *IntCmd LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd LPop(ctx context.Context, key string) *StringCmd LPopCount(ctx context.Context, key string, count int) *StringSliceCmd LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd LPush(ctx context.Context, key string, values ...interface{}) *IntCmd LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd RPop(ctx context.Context, key string) *StringCmd RPopCount(ctx context.Context, key string, count int) *StringSliceCmd RPopLPush(ctx context.Context, source, destination string) *StringCmd RPush(ctx context.Context, key string, values ...interface{}) *IntCmd RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd } func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)+1) args[0] = "blpop" for i, key := range keys { args[1+i] = key } args[len(args)-1] = formatSec(ctx, timeout) cmd := NewStringSliceCmd(ctx, args...) cmd.setReadTimeout(timeout) _ = c(ctx, cmd) return cmd } func (c cmdable) BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd { args := make([]interface{}, 3+len(keys), 6+len(keys)) args[0] = "blmpop" args[1] = formatSec(ctx, timeout) args[2] = len(keys) for i, key := range keys { args[3+i] = key } args = append(args, strings.ToLower(direction), "count", count) cmd := NewKeyValuesCmd(ctx, args...) cmd.setReadTimeout(timeout) _ = c(ctx, cmd) return cmd } func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)+1) args[0] = "brpop" for i, key := range keys { args[1+i] = key } args[len(keys)+1] = formatSec(ctx, timeout) cmd := NewStringSliceCmd(ctx, args...) cmd.setReadTimeout(timeout) _ = c(ctx, cmd) return cmd } func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { cmd := NewStringCmd( ctx, "brpoplpush", source, destination, formatSec(ctx, timeout), ) cmd.setReadTimeout(timeout) _ = c(ctx, cmd) return cmd } func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { cmd := NewStringCmd(ctx, "lindex", key, index) _ = c(ctx, cmd) return cmd } // LMPop Pops one or more elements from the first non-empty list key from the list of provided key names. // direction: left or right, count: > 0 // example: client.LMPop(ctx, "left", 3, "key1", "key2") func (c cmdable) LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd { args := make([]interface{}, 2+len(keys), 5+len(keys)) args[0] = "lmpop" args[1] = len(keys) for i, key := range keys { args[2+i] = key } args = append(args, strings.ToLower(direction), "count", count) cmd := NewKeyValuesCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) _ = c(ctx, cmd) return cmd } func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd { cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value) _ = c(ctx, cmd) return cmd } func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd { cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value) _ = c(ctx, cmd) return cmd } func (c cmdable) LLen(ctx context.Context, key string) *IntCmd { cmd := NewIntCmd(ctx, "llen", key) _ = c(ctx, cmd) return cmd } func (c cmdable) LPop(ctx context.Context, key string) *StringCmd { cmd := NewStringCmd(ctx, "lpop", key) _ = c(ctx, cmd) return cmd } func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "lpop", key, count) _ = c(ctx, cmd) return cmd } type LPosArgs struct { Rank, MaxLen int64 } func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd { args := []interface{}{"lpos", key, value} if a.Rank != 0 { args = append(args, "rank", a.Rank) } if a.MaxLen != 0 { args = append(args, "maxlen", a.MaxLen) } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd { args := []interface{}{"lpos", key, value, "count", count} if a.Rank != 0 { args = append(args, "rank", a.Rank) } if a.MaxLen != 0 { args = append(args, "maxlen", a.MaxLen) } cmd := NewIntSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(values)) args[0] = "lpush" args[1] = key args = appendArgs(args, values) cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(values)) args[0] = "lpushx" args[1] = key args = appendArgs(args, values) cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { cmd := NewStringSliceCmd( ctx, "lrange", key, start, stop, ) _ = c(ctx, cmd) return cmd } func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd { cmd := NewIntCmd(ctx, "lrem", key, count, value) _ = c(ctx, cmd) return cmd } func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd { cmd := NewStatusCmd(ctx, "lset", key, index, value) _ = c(ctx, cmd) return cmd } func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd { cmd := NewStatusCmd( ctx, "ltrim", key, start, stop, ) _ = c(ctx, cmd) return cmd } func (c cmdable) RPop(ctx context.Context, key string) *StringCmd { cmd := NewStringCmd(ctx, "rpop", key) _ = c(ctx, cmd) return cmd } func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "rpop", key, count) _ = c(ctx, cmd) return cmd } func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { cmd := NewStringCmd(ctx, "rpoplpush", source, destination) _ = c(ctx, cmd) return cmd } func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(values)) args[0] = "rpush" args[1] = key args = appendArgs(args, values) cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(values)) args[0] = "rpushx" args[1] = key args = appendArgs(args, values) cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd { cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos) _ = c(ctx, cmd) return cmd } func (c cmdable) BLMove( ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration, ) *StringCmd { cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout)) cmd.setReadTimeout(timeout) _ = c(ctx, cmd) return cmd } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/search_commands.go0000644000000000000000000017604415024302467025406 0ustar rootrootpackage redis import ( "context" "fmt" "strconv" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/proto" ) type SearchCmdable interface { FT_List(ctx context.Context) *StringSliceCmd FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd FTAliasDel(ctx context.Context, alias string) *StatusCmd FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd FTDictDump(ctx context.Context, dict string) *StringSliceCmd FTDropIndex(ctx context.Context, index string) *StatusCmd FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd FTExplain(ctx context.Context, index string, query string) *StringCmd FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd FTInfo(ctx context.Context, index string) *FTInfoCmd FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd FTSearch(ctx context.Context, index string, query string) *FTSearchCmd FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd FTSynDump(ctx context.Context, index string) *FTSynDumpCmd FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd } type FTCreateOptions struct { OnHash bool OnJSON bool Prefix []interface{} Filter string DefaultLanguage string LanguageField string Score float64 ScoreField string PayloadField string MaxTextFields int NoOffsets bool Temporary int NoHL bool NoFields bool NoFreqs bool StopWords []interface{} SkipInitialScan bool } type FieldSchema struct { FieldName string As string FieldType SearchFieldType Sortable bool UNF bool NoStem bool NoIndex bool PhoneticMatcher string Weight float64 Separator string CaseSensitive bool WithSuffixtrie bool VectorArgs *FTVectorArgs GeoShapeFieldType string IndexEmpty bool IndexMissing bool } type FTVectorArgs struct { FlatOptions *FTFlatOptions HNSWOptions *FTHNSWOptions } type FTFlatOptions struct { Type string Dim int DistanceMetric string InitialCapacity int BlockSize int } type FTHNSWOptions struct { Type string Dim int DistanceMetric string InitialCapacity int MaxEdgesPerNode int MaxAllowedEdgesPerNode int EFRunTime int Epsilon float64 } type FTDropIndexOptions struct { DeleteDocs bool } type SpellCheckTerms struct { Include bool Exclude bool Dictionary string } type FTExplainOptions struct { // Dialect 1,3 and 4 are deprecated since redis 8.0 Dialect string } type FTSynUpdateOptions struct { SkipInitialScan bool } type SearchAggregator int const ( SearchInvalid = SearchAggregator(iota) SearchAvg SearchSum SearchMin SearchMax SearchCount SearchCountDistinct SearchCountDistinctish SearchStdDev SearchQuantile SearchToList SearchFirstValue SearchRandomSample ) func (a SearchAggregator) String() string { switch a { case SearchInvalid: return "" case SearchAvg: return "AVG" case SearchSum: return "SUM" case SearchMin: return "MIN" case SearchMax: return "MAX" case SearchCount: return "COUNT" case SearchCountDistinct: return "COUNT_DISTINCT" case SearchCountDistinctish: return "COUNT_DISTINCTISH" case SearchStdDev: return "STDDEV" case SearchQuantile: return "QUANTILE" case SearchToList: return "TOLIST" case SearchFirstValue: return "FIRST_VALUE" case SearchRandomSample: return "RANDOM_SAMPLE" default: return "" } } type SearchFieldType int const ( SearchFieldTypeInvalid = SearchFieldType(iota) SearchFieldTypeNumeric SearchFieldTypeTag SearchFieldTypeText SearchFieldTypeGeo SearchFieldTypeVector SearchFieldTypeGeoShape ) func (t SearchFieldType) String() string { switch t { case SearchFieldTypeInvalid: return "" case SearchFieldTypeNumeric: return "NUMERIC" case SearchFieldTypeTag: return "TAG" case SearchFieldTypeText: return "TEXT" case SearchFieldTypeGeo: return "GEO" case SearchFieldTypeVector: return "VECTOR" case SearchFieldTypeGeoShape: return "GEOSHAPE" default: return "TEXT" } } // Each AggregateReducer have different args. // Please follow https://redis.io/docs/interact/search-and-query/search/aggregations/#supported-groupby-reducers for more information. type FTAggregateReducer struct { Reducer SearchAggregator Args []interface{} As string } type FTAggregateGroupBy struct { Fields []interface{} Reduce []FTAggregateReducer } type FTAggregateSortBy struct { FieldName string Asc bool Desc bool } type FTAggregateApply struct { Field string As string } type FTAggregateLoad struct { Field string As string } type FTAggregateWithCursor struct { Count int MaxIdle int } type FTAggregateOptions struct { Verbatim bool LoadAll bool Load []FTAggregateLoad Timeout int GroupBy []FTAggregateGroupBy SortBy []FTAggregateSortBy SortByMax int // Scorer is used to set scoring function, if not set passed, a default will be used. // The default scorer depends on the Redis version: // - `BM25` for Redis >= 8 // - `TFIDF` for Redis < 8 Scorer string // AddScores is available in Redis CE 8 AddScores bool Apply []FTAggregateApply LimitOffset int Limit int Filter string WithCursor bool WithCursorOptions *FTAggregateWithCursor Params map[string]interface{} // Dialect 1,3 and 4 are deprecated since redis 8.0 DialectVersion int } type FTSearchFilter struct { FieldName interface{} Min interface{} Max interface{} } type FTSearchGeoFilter struct { FieldName string Longitude float64 Latitude float64 Radius float64 Unit string } type FTSearchReturn struct { FieldName string As string } type FTSearchSortBy struct { FieldName string Asc bool Desc bool } // FTSearchOptions hold options that can be passed to the FT.SEARCH command. // More information about the options can be found // in the documentation for FT.SEARCH https://redis.io/docs/latest/commands/ft.search/ type FTSearchOptions struct { NoContent bool Verbatim bool NoStopWords bool WithScores bool WithPayloads bool WithSortKeys bool Filters []FTSearchFilter GeoFilter []FTSearchGeoFilter InKeys []interface{} InFields []interface{} Return []FTSearchReturn Slop int Timeout int InOrder bool Language string Expander string // Scorer is used to set scoring function, if not set passed, a default will be used. // The default scorer depends on the Redis version: // - `BM25` for Redis >= 8 // - `TFIDF` for Redis < 8 Scorer string ExplainScore bool Payload string SortBy []FTSearchSortBy SortByWithCount bool LimitOffset int Limit int // CountOnly sets LIMIT 0 0 to get the count - number of documents in the result set without actually returning the result set. // When using this option, the Limit and LimitOffset options are ignored. CountOnly bool Params map[string]interface{} // Dialect 1,3 and 4 are deprecated since redis 8.0 DialectVersion int } type FTSynDumpResult struct { Term string Synonyms []string } type FTSynDumpCmd struct { baseCmd val []FTSynDumpResult } type FTAggregateResult struct { Total int Rows []AggregateRow } type AggregateRow struct { Fields map[string]interface{} } type AggregateCmd struct { baseCmd val *FTAggregateResult } type FTInfoResult struct { IndexErrors IndexErrors Attributes []FTAttribute BytesPerRecordAvg string Cleaning int CursorStats CursorStats DialectStats map[string]int DocTableSizeMB float64 FieldStatistics []FieldStatistic GCStats GCStats GeoshapesSzMB float64 HashIndexingFailures int IndexDefinition IndexDefinition IndexName string IndexOptions []string Indexing int InvertedSzMB float64 KeyTableSizeMB float64 MaxDocID int NumDocs int NumRecords int NumTerms int NumberOfUses int OffsetBitsPerRecordAvg string OffsetVectorsSzMB float64 OffsetsPerTermAvg string PercentIndexed float64 RecordsPerDocAvg string SortableValuesSizeMB float64 TagOverheadSzMB float64 TextOverheadSzMB float64 TotalIndexMemorySzMB float64 TotalIndexingTime int TotalInvertedIndexBlocks int VectorIndexSzMB float64 } type IndexErrors struct { IndexingFailures int LastIndexingError string LastIndexingErrorKey string } type FTAttribute struct { Identifier string Attribute string Type string Weight float64 Sortable bool NoStem bool NoIndex bool UNF bool PhoneticMatcher string CaseSensitive bool WithSuffixtrie bool } type CursorStats struct { GlobalIdle int GlobalTotal int IndexCapacity int IndexTotal int } type FieldStatistic struct { Identifier string Attribute string IndexErrors IndexErrors } type GCStats struct { BytesCollected int TotalMsRun int TotalCycles int AverageCycleTimeMs string LastRunTimeMs int GCNumericTreesMissed int GCBlocksDenied int } type IndexDefinition struct { KeyType string Prefixes []string DefaultScore float64 } type FTSpellCheckOptions struct { Distance int Terms *FTSpellCheckTerms // Dialect 1,3 and 4 are deprecated since redis 8.0 Dialect int } type FTSpellCheckTerms struct { Inclusion string // Either "INCLUDE" or "EXCLUDE" Dictionary string Terms []interface{} } type SpellCheckResult struct { Term string Suggestions []SpellCheckSuggestion } type SpellCheckSuggestion struct { Score float64 Suggestion string } type FTSearchResult struct { Total int Docs []Document } type Document struct { ID string Score *float64 Payload *string SortKey *string Fields map[string]string } type AggregateQuery []interface{} // FT_List - Lists all the existing indexes in the database. // For more information, please refer to the Redis documentation: // [FT._LIST]: (https://redis.io/commands/ft._list/) func (c cmdable) FT_List(ctx context.Context) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "FT._LIST") _ = c(ctx, cmd) return cmd } // FTAggregate - Performs a search query on an index and applies a series of aggregate transformations to the result. // The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. // For more information, please refer to the Redis documentation: // [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) func (c cmdable) FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd { args := []interface{}{"FT.AGGREGATE", index, query} cmd := NewMapStringInterfaceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func FTAggregateQuery(query string, options *FTAggregateOptions) AggregateQuery { queryArgs := []interface{}{query} if options != nil { if options.Verbatim { queryArgs = append(queryArgs, "VERBATIM") } if options.Scorer != "" { queryArgs = append(queryArgs, "SCORER", options.Scorer) } if options.AddScores { queryArgs = append(queryArgs, "ADDSCORES") } if options.LoadAll && options.Load != nil { panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive") } if options.LoadAll { queryArgs = append(queryArgs, "LOAD", "*") } if options.Load != nil { queryArgs = append(queryArgs, "LOAD", len(options.Load)) index, count := len(queryArgs)-1, 0 for _, load := range options.Load { queryArgs = append(queryArgs, load.Field) count++ if load.As != "" { queryArgs = append(queryArgs, "AS", load.As) count += 2 } } queryArgs[index] = count } if options.Timeout > 0 { queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) } for _, apply := range options.Apply { queryArgs = append(queryArgs, "APPLY", apply.Field) if apply.As != "" { queryArgs = append(queryArgs, "AS", apply.As) } } if options.GroupBy != nil { for _, groupBy := range options.GroupBy { queryArgs = append(queryArgs, "GROUPBY", len(groupBy.Fields)) queryArgs = append(queryArgs, groupBy.Fields...) for _, reducer := range groupBy.Reduce { queryArgs = append(queryArgs, "REDUCE") queryArgs = append(queryArgs, reducer.Reducer.String()) if reducer.Args != nil { queryArgs = append(queryArgs, len(reducer.Args)) queryArgs = append(queryArgs, reducer.Args...) } else { queryArgs = append(queryArgs, 0) } if reducer.As != "" { queryArgs = append(queryArgs, "AS", reducer.As) } } } } if options.SortBy != nil { queryArgs = append(queryArgs, "SORTBY") sortByOptions := []interface{}{} for _, sortBy := range options.SortBy { sortByOptions = append(sortByOptions, sortBy.FieldName) if sortBy.Asc && sortBy.Desc { panic("FT.AGGREGATE: ASC and DESC are mutually exclusive") } if sortBy.Asc { sortByOptions = append(sortByOptions, "ASC") } if sortBy.Desc { sortByOptions = append(sortByOptions, "DESC") } } queryArgs = append(queryArgs, len(sortByOptions)) queryArgs = append(queryArgs, sortByOptions...) } if options.SortByMax > 0 { queryArgs = append(queryArgs, "MAX", options.SortByMax) } if options.LimitOffset >= 0 && options.Limit > 0 { queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) } if options.Filter != "" { queryArgs = append(queryArgs, "FILTER", options.Filter) } if options.WithCursor { queryArgs = append(queryArgs, "WITHCURSOR") if options.WithCursorOptions != nil { if options.WithCursorOptions.Count > 0 { queryArgs = append(queryArgs, "COUNT", options.WithCursorOptions.Count) } if options.WithCursorOptions.MaxIdle > 0 { queryArgs = append(queryArgs, "MAXIDLE", options.WithCursorOptions.MaxIdle) } } } if options.Params != nil { queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) for key, value := range options.Params { queryArgs = append(queryArgs, key, value) } } if options.DialectVersion > 0 { queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) } else { queryArgs = append(queryArgs, "DIALECT", 2) } } return queryArgs } func ProcessAggregateResult(data []interface{}) (*FTAggregateResult, error) { if len(data) == 0 { return nil, fmt.Errorf("no data returned") } total, ok := data[0].(int64) if !ok { return nil, fmt.Errorf("invalid total format") } rows := make([]AggregateRow, 0, len(data)-1) for _, row := range data[1:] { fields, ok := row.([]interface{}) if !ok { return nil, fmt.Errorf("invalid row format") } rowMap := make(map[string]interface{}) for i := 0; i < len(fields); i += 2 { key, ok := fields[i].(string) if !ok { return nil, fmt.Errorf("invalid field key format") } value := fields[i+1] rowMap[key] = value } rows = append(rows, AggregateRow{Fields: rowMap}) } result := &FTAggregateResult{ Total: int(total), Rows: rows, } return result, nil } func NewAggregateCmd(ctx context.Context, args ...interface{}) *AggregateCmd { return &AggregateCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *AggregateCmd) SetVal(val *FTAggregateResult) { cmd.val = val } func (cmd *AggregateCmd) Val() *FTAggregateResult { return cmd.val } func (cmd *AggregateCmd) Result() (*FTAggregateResult, error) { return cmd.val, cmd.err } func (cmd *AggregateCmd) RawVal() interface{} { return cmd.rawVal } func (cmd *AggregateCmd) RawResult() (interface{}, error) { return cmd.rawVal, cmd.err } func (cmd *AggregateCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *AggregateCmd) readReply(rd *proto.Reader) (err error) { data, err := rd.ReadSlice() if err != nil { return err } cmd.val, err = ProcessAggregateResult(data) if err != nil { return err } return nil } // FTAggregateWithArgs - Performs a search query on an index and applies a series of aggregate transformations to the result. // The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. // This function also allows for specifying additional options such as: Verbatim, LoadAll, Load, Timeout, GroupBy, SortBy, SortByMax, Apply, LimitOffset, Limit, Filter, WithCursor, Params, and DialectVersion. // For more information, please refer to the Redis documentation: // [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) func (c cmdable) FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd { args := []interface{}{"FT.AGGREGATE", index, query} if options != nil { if options.Verbatim { args = append(args, "VERBATIM") } if options.Scorer != "" { args = append(args, "SCORER", options.Scorer) } if options.AddScores { args = append(args, "ADDSCORES") } if options.LoadAll && options.Load != nil { panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive") } if options.LoadAll { args = append(args, "LOAD", "*") } if options.Load != nil { args = append(args, "LOAD", len(options.Load)) index, count := len(args)-1, 0 for _, load := range options.Load { args = append(args, load.Field) count++ if load.As != "" { args = append(args, "AS", load.As) count += 2 } } args[index] = count } if options.Timeout > 0 { args = append(args, "TIMEOUT", options.Timeout) } for _, apply := range options.Apply { args = append(args, "APPLY", apply.Field) if apply.As != "" { args = append(args, "AS", apply.As) } } if options.GroupBy != nil { for _, groupBy := range options.GroupBy { args = append(args, "GROUPBY", len(groupBy.Fields)) args = append(args, groupBy.Fields...) for _, reducer := range groupBy.Reduce { args = append(args, "REDUCE") args = append(args, reducer.Reducer.String()) if reducer.Args != nil { args = append(args, len(reducer.Args)) args = append(args, reducer.Args...) } else { args = append(args, 0) } if reducer.As != "" { args = append(args, "AS", reducer.As) } } } } if options.SortBy != nil { args = append(args, "SORTBY") sortByOptions := []interface{}{} for _, sortBy := range options.SortBy { sortByOptions = append(sortByOptions, sortBy.FieldName) if sortBy.Asc && sortBy.Desc { panic("FT.AGGREGATE: ASC and DESC are mutually exclusive") } if sortBy.Asc { sortByOptions = append(sortByOptions, "ASC") } if sortBy.Desc { sortByOptions = append(sortByOptions, "DESC") } } args = append(args, len(sortByOptions)) args = append(args, sortByOptions...) } if options.SortByMax > 0 { args = append(args, "MAX", options.SortByMax) } if options.LimitOffset >= 0 && options.Limit > 0 { args = append(args, "LIMIT", options.LimitOffset, options.Limit) } if options.Filter != "" { args = append(args, "FILTER", options.Filter) } if options.WithCursor { args = append(args, "WITHCURSOR") if options.WithCursorOptions != nil { if options.WithCursorOptions.Count > 0 { args = append(args, "COUNT", options.WithCursorOptions.Count) } if options.WithCursorOptions.MaxIdle > 0 { args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle) } } } if options.Params != nil { args = append(args, "PARAMS", len(options.Params)*2) for key, value := range options.Params { args = append(args, key, value) } } if options.DialectVersion > 0 { args = append(args, "DIALECT", options.DialectVersion) } else { args = append(args, "DIALECT", 2) } } cmd := NewAggregateCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTAliasAdd - Adds an alias to an index. // The 'index' parameter specifies the index to which the alias is added, and the 'alias' parameter specifies the alias. // For more information, please refer to the Redis documentation: // [FT.ALIASADD]: (https://redis.io/commands/ft.aliasadd/) func (c cmdable) FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd { args := []interface{}{"FT.ALIASADD", alias, index} cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTAliasDel - Removes an alias from an index. // The 'alias' parameter specifies the alias to be removed. // For more information, please refer to the Redis documentation: // [FT.ALIASDEL]: (https://redis.io/commands/ft.aliasdel/) func (c cmdable) FTAliasDel(ctx context.Context, alias string) *StatusCmd { cmd := NewStatusCmd(ctx, "FT.ALIASDEL", alias) _ = c(ctx, cmd) return cmd } // FTAliasUpdate - Updates an alias to an index. // The 'index' parameter specifies the index to which the alias is updated, and the 'alias' parameter specifies the alias. // If the alias already exists for a different index, it updates the alias to point to the specified index instead. // For more information, please refer to the Redis documentation: // [FT.ALIASUPDATE]: (https://redis.io/commands/ft.aliasupdate/) func (c cmdable) FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd { cmd := NewStatusCmd(ctx, "FT.ALIASUPDATE", alias, index) _ = c(ctx, cmd) return cmd } // FTAlter - Alters the definition of an existing index. // The 'index' parameter specifies the index to alter, and the 'skipInitialScan' parameter specifies whether to skip the initial scan. // The 'definition' parameter specifies the new definition for the index. // For more information, please refer to the Redis documentation: // [FT.ALTER]: (https://redis.io/commands/ft.alter/) func (c cmdable) FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd { args := []interface{}{"FT.ALTER", index} if skipInitialScan { args = append(args, "SKIPINITIALSCAN") } args = append(args, "SCHEMA", "ADD") args = append(args, definition...) cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // Retrieves the value of a RediSearch configuration parameter. // The 'option' parameter specifies the configuration parameter to retrieve. // For more information, please refer to the Redis [FT.CONFIG GET] documentation. // // Deprecated: FTConfigGet is deprecated in Redis 8. // All configuration will be done with the CONFIG GET command. // For more information check [Client.ConfigGet] and [CONFIG GET Documentation] // // [CONFIG GET Documentation]: https://redis.io/commands/config-get/ // [FT.CONFIG GET]: https://redis.io/commands/ft.config-get/ func (c cmdable) FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd { cmd := NewMapMapStringInterfaceCmd(ctx, "FT.CONFIG", "GET", option) _ = c(ctx, cmd) return cmd } // Sets the value of a RediSearch configuration parameter. // The 'option' parameter specifies the configuration parameter to set, and the 'value' parameter specifies the new value. // For more information, please refer to the Redis [FT.CONFIG SET] documentation. // // Deprecated: FTConfigSet is deprecated in Redis 8. // All configuration will be done with the CONFIG SET command. // For more information check [Client.ConfigSet] and [CONFIG SET Documentation] // // [CONFIG SET Documentation]: https://redis.io/commands/config-set/ // [FT.CONFIG SET]: https://redis.io/commands/ft.config-set/ func (c cmdable) FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd { cmd := NewStatusCmd(ctx, "FT.CONFIG", "SET", option, value) _ = c(ctx, cmd) return cmd } // FTCreate - Creates a new index with the given options and schema. // The 'index' parameter specifies the name of the index to create. // The 'options' parameter specifies various options for the index, such as: // whether to index hashes or JSONs, prefixes, filters, default language, score, score field, payload field, etc. // The 'schema' parameter specifies the schema for the index, which includes the field name, field type, etc. // For more information, please refer to the Redis documentation: // [FT.CREATE]: (https://redis.io/commands/ft.create/) func (c cmdable) FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd { args := []interface{}{"FT.CREATE", index} if options != nil { if options.OnHash && !options.OnJSON { args = append(args, "ON", "HASH") } if options.OnJSON && !options.OnHash { args = append(args, "ON", "JSON") } if options.OnHash && options.OnJSON { panic("FT.CREATE: ON HASH and ON JSON are mutually exclusive") } if options.Prefix != nil { args = append(args, "PREFIX", len(options.Prefix)) args = append(args, options.Prefix...) } if options.Filter != "" { args = append(args, "FILTER", options.Filter) } if options.DefaultLanguage != "" { args = append(args, "LANGUAGE", options.DefaultLanguage) } if options.LanguageField != "" { args = append(args, "LANGUAGE_FIELD", options.LanguageField) } if options.Score > 0 { args = append(args, "SCORE", options.Score) } if options.ScoreField != "" { args = append(args, "SCORE_FIELD", options.ScoreField) } if options.PayloadField != "" { args = append(args, "PAYLOAD_FIELD", options.PayloadField) } if options.MaxTextFields > 0 { args = append(args, "MAXTEXTFIELDS", options.MaxTextFields) } if options.NoOffsets { args = append(args, "NOOFFSETS") } if options.Temporary > 0 { args = append(args, "TEMPORARY", options.Temporary) } if options.NoHL { args = append(args, "NOHL") } if options.NoFields { args = append(args, "NOFIELDS") } if options.NoFreqs { args = append(args, "NOFREQS") } if options.StopWords != nil { args = append(args, "STOPWORDS", len(options.StopWords)) args = append(args, options.StopWords...) } if options.SkipInitialScan { args = append(args, "SKIPINITIALSCAN") } } if schema == nil { panic("FT.CREATE: SCHEMA is required") } args = append(args, "SCHEMA") for _, schema := range schema { if schema.FieldName == "" || schema.FieldType == SearchFieldTypeInvalid { panic("FT.CREATE: SCHEMA FieldName and FieldType are required") } args = append(args, schema.FieldName) if schema.As != "" { args = append(args, "AS", schema.As) } args = append(args, schema.FieldType.String()) if schema.VectorArgs != nil { if schema.FieldType != SearchFieldTypeVector { panic("FT.CREATE: SCHEMA FieldType VECTOR is required for VectorArgs") } if schema.VectorArgs.FlatOptions != nil && schema.VectorArgs.HNSWOptions != nil { panic("FT.CREATE: SCHEMA VectorArgs FlatOptions and HNSWOptions are mutually exclusive") } if schema.VectorArgs.FlatOptions != nil { args = append(args, "FLAT") if schema.VectorArgs.FlatOptions.Type == "" || schema.VectorArgs.FlatOptions.Dim == 0 || schema.VectorArgs.FlatOptions.DistanceMetric == "" { panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR FLAT") } flatArgs := []interface{}{ "TYPE", schema.VectorArgs.FlatOptions.Type, "DIM", schema.VectorArgs.FlatOptions.Dim, "DISTANCE_METRIC", schema.VectorArgs.FlatOptions.DistanceMetric, } if schema.VectorArgs.FlatOptions.InitialCapacity > 0 { flatArgs = append(flatArgs, "INITIAL_CAP", schema.VectorArgs.FlatOptions.InitialCapacity) } if schema.VectorArgs.FlatOptions.BlockSize > 0 { flatArgs = append(flatArgs, "BLOCK_SIZE", schema.VectorArgs.FlatOptions.BlockSize) } args = append(args, len(flatArgs)) args = append(args, flatArgs...) } if schema.VectorArgs.HNSWOptions != nil { args = append(args, "HNSW") if schema.VectorArgs.HNSWOptions.Type == "" || schema.VectorArgs.HNSWOptions.Dim == 0 || schema.VectorArgs.HNSWOptions.DistanceMetric == "" { panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR HNSW") } hnswArgs := []interface{}{ "TYPE", schema.VectorArgs.HNSWOptions.Type, "DIM", schema.VectorArgs.HNSWOptions.Dim, "DISTANCE_METRIC", schema.VectorArgs.HNSWOptions.DistanceMetric, } if schema.VectorArgs.HNSWOptions.InitialCapacity > 0 { hnswArgs = append(hnswArgs, "INITIAL_CAP", schema.VectorArgs.HNSWOptions.InitialCapacity) } if schema.VectorArgs.HNSWOptions.MaxEdgesPerNode > 0 { hnswArgs = append(hnswArgs, "M", schema.VectorArgs.HNSWOptions.MaxEdgesPerNode) } if schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode > 0 { hnswArgs = append(hnswArgs, "EF_CONSTRUCTION", schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode) } if schema.VectorArgs.HNSWOptions.EFRunTime > 0 { hnswArgs = append(hnswArgs, "EF_RUNTIME", schema.VectorArgs.HNSWOptions.EFRunTime) } if schema.VectorArgs.HNSWOptions.Epsilon > 0 { hnswArgs = append(hnswArgs, "EPSILON", schema.VectorArgs.HNSWOptions.Epsilon) } args = append(args, len(hnswArgs)) args = append(args, hnswArgs...) } } if schema.GeoShapeFieldType != "" { if schema.FieldType != SearchFieldTypeGeoShape { panic("FT.CREATE: SCHEMA FieldType GEOSHAPE is required for GeoShapeFieldType") } args = append(args, schema.GeoShapeFieldType) } if schema.NoStem { args = append(args, "NOSTEM") } if schema.Sortable { args = append(args, "SORTABLE") } if schema.UNF { args = append(args, "UNF") } if schema.NoIndex { args = append(args, "NOINDEX") } if schema.PhoneticMatcher != "" { args = append(args, "PHONETIC", schema.PhoneticMatcher) } if schema.Weight > 0 { args = append(args, "WEIGHT", schema.Weight) } if schema.Separator != "" { args = append(args, "SEPARATOR", schema.Separator) } if schema.CaseSensitive { args = append(args, "CASESENSITIVE") } if schema.WithSuffixtrie { args = append(args, "WITHSUFFIXTRIE") } if schema.IndexEmpty { args = append(args, "INDEXEMPTY") } if schema.IndexMissing { args = append(args, "INDEXMISSING") } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTCursorDel - Deletes a cursor from an existing index. // The 'index' parameter specifies the index from which to delete the cursor, and the 'cursorId' parameter specifies the ID of the cursor to delete. // For more information, please refer to the Redis documentation: // [FT.CURSOR DEL]: (https://redis.io/commands/ft.cursor-del/) func (c cmdable) FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd { cmd := NewStatusCmd(ctx, "FT.CURSOR", "DEL", index, cursorId) _ = c(ctx, cmd) return cmd } // FTCursorRead - Reads the next results from an existing cursor. // The 'index' parameter specifies the index from which to read the cursor, the 'cursorId' parameter specifies the ID of the cursor to read, and the 'count' parameter specifies the number of results to read. // For more information, please refer to the Redis documentation: // [FT.CURSOR READ]: (https://redis.io/commands/ft.cursor-read/) func (c cmdable) FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd { args := []interface{}{"FT.CURSOR", "READ", index, cursorId} if count > 0 { args = append(args, "COUNT", count) } cmd := NewMapStringInterfaceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTDictAdd - Adds terms to a dictionary. // The 'dict' parameter specifies the dictionary to which to add the terms, and the 'term' parameter specifies the terms to add. // For more information, please refer to the Redis documentation: // [FT.DICTADD]: (https://redis.io/commands/ft.dictadd/) func (c cmdable) FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd { args := []interface{}{"FT.DICTADD", dict} args = append(args, term...) cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTDictDel - Deletes terms from a dictionary. // The 'dict' parameter specifies the dictionary from which to delete the terms, and the 'term' parameter specifies the terms to delete. // For more information, please refer to the Redis documentation: // [FT.DICTDEL]: (https://redis.io/commands/ft.dictdel/) func (c cmdable) FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd { args := []interface{}{"FT.DICTDEL", dict} args = append(args, term...) cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTDictDump - Returns all terms in the specified dictionary. // The 'dict' parameter specifies the dictionary from which to return the terms. // For more information, please refer to the Redis documentation: // [FT.DICTDUMP]: (https://redis.io/commands/ft.dictdump/) func (c cmdable) FTDictDump(ctx context.Context, dict string) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "FT.DICTDUMP", dict) _ = c(ctx, cmd) return cmd } // FTDropIndex - Deletes an index. // The 'index' parameter specifies the index to delete. // For more information, please refer to the Redis documentation: // [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) func (c cmdable) FTDropIndex(ctx context.Context, index string) *StatusCmd { args := []interface{}{"FT.DROPINDEX", index} cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTDropIndexWithArgs - Deletes an index with options. // The 'index' parameter specifies the index to delete, and the 'options' parameter specifies the DeleteDocs option for docs deletion. // For more information, please refer to the Redis documentation: // [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) func (c cmdable) FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd { args := []interface{}{"FT.DROPINDEX", index} if options != nil { if options.DeleteDocs { args = append(args, "DD") } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTExplain - Returns the execution plan for a complex query. // The 'index' parameter specifies the index to query, and the 'query' parameter specifies the query string. // For more information, please refer to the Redis documentation: // [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) func (c cmdable) FTExplain(ctx context.Context, index string, query string) *StringCmd { cmd := NewStringCmd(ctx, "FT.EXPLAIN", index, query) _ = c(ctx, cmd) return cmd } // FTExplainWithArgs - Returns the execution plan for a complex query with options. // The 'index' parameter specifies the index to query, the 'query' parameter specifies the query string, and the 'options' parameter specifies the Dialect for the query. // For more information, please refer to the Redis documentation: // [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) func (c cmdable) FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd { args := []interface{}{"FT.EXPLAIN", index, query} if options.Dialect != "" { args = append(args, "DIALECT", options.Dialect) } else { args = append(args, "DIALECT", 2) } cmd := NewStringCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTExplainCli - Returns the execution plan for a complex query. [Not Implemented] // For more information, see https://redis.io/commands/ft.explaincli/ func (c cmdable) FTExplainCli(ctx context.Context, key, path string) error { panic("not implemented") } func parseFTInfo(data map[string]interface{}) (FTInfoResult, error) { var ftInfo FTInfoResult // Manually parse each field from the map if indexErrors, ok := data["Index Errors"].([]interface{}); ok { ftInfo.IndexErrors = IndexErrors{ IndexingFailures: internal.ToInteger(indexErrors[1]), LastIndexingError: internal.ToString(indexErrors[3]), LastIndexingErrorKey: internal.ToString(indexErrors[5]), } } if attributes, ok := data["attributes"].([]interface{}); ok { for _, attr := range attributes { if attrMap, ok := attr.([]interface{}); ok { att := FTAttribute{} for i := 0; i < len(attrMap); i++ { if internal.ToLower(internal.ToString(attrMap[i])) == "attribute" { att.Attribute = internal.ToString(attrMap[i+1]) continue } if internal.ToLower(internal.ToString(attrMap[i])) == "identifier" { att.Identifier = internal.ToString(attrMap[i+1]) continue } if internal.ToLower(internal.ToString(attrMap[i])) == "type" { att.Type = internal.ToString(attrMap[i+1]) continue } if internal.ToLower(internal.ToString(attrMap[i])) == "weight" { att.Weight = internal.ToFloat(attrMap[i+1]) continue } if internal.ToLower(internal.ToString(attrMap[i])) == "nostem" { att.NoStem = true continue } if internal.ToLower(internal.ToString(attrMap[i])) == "sortable" { att.Sortable = true continue } if internal.ToLower(internal.ToString(attrMap[i])) == "noindex" { att.NoIndex = true continue } if internal.ToLower(internal.ToString(attrMap[i])) == "unf" { att.UNF = true continue } if internal.ToLower(internal.ToString(attrMap[i])) == "phonetic" { att.PhoneticMatcher = internal.ToString(attrMap[i+1]) continue } if internal.ToLower(internal.ToString(attrMap[i])) == "case_sensitive" { att.CaseSensitive = true continue } if internal.ToLower(internal.ToString(attrMap[i])) == "withsuffixtrie" { att.WithSuffixtrie = true continue } } ftInfo.Attributes = append(ftInfo.Attributes, att) } } } ftInfo.BytesPerRecordAvg = internal.ToString(data["bytes_per_record_avg"]) ftInfo.Cleaning = internal.ToInteger(data["cleaning"]) if cursorStats, ok := data["cursor_stats"].([]interface{}); ok { ftInfo.CursorStats = CursorStats{ GlobalIdle: internal.ToInteger(cursorStats[1]), GlobalTotal: internal.ToInteger(cursorStats[3]), IndexCapacity: internal.ToInteger(cursorStats[5]), IndexTotal: internal.ToInteger(cursorStats[7]), } } if dialectStats, ok := data["dialect_stats"].([]interface{}); ok { ftInfo.DialectStats = make(map[string]int) for i := 0; i < len(dialectStats); i += 2 { ftInfo.DialectStats[internal.ToString(dialectStats[i])] = internal.ToInteger(dialectStats[i+1]) } } ftInfo.DocTableSizeMB = internal.ToFloat(data["doc_table_size_mb"]) if fieldStats, ok := data["field statistics"].([]interface{}); ok { for _, stat := range fieldStats { if statMap, ok := stat.([]interface{}); ok { ftInfo.FieldStatistics = append(ftInfo.FieldStatistics, FieldStatistic{ Identifier: internal.ToString(statMap[1]), Attribute: internal.ToString(statMap[3]), IndexErrors: IndexErrors{ IndexingFailures: internal.ToInteger(statMap[5].([]interface{})[1]), LastIndexingError: internal.ToString(statMap[5].([]interface{})[3]), LastIndexingErrorKey: internal.ToString(statMap[5].([]interface{})[5]), }, }) } } } if gcStats, ok := data["gc_stats"].([]interface{}); ok { ftInfo.GCStats = GCStats{} for i := 0; i < len(gcStats); i += 2 { if internal.ToLower(internal.ToString(gcStats[i])) == "bytes_collected" { ftInfo.GCStats.BytesCollected = internal.ToInteger(gcStats[i+1]) continue } if internal.ToLower(internal.ToString(gcStats[i])) == "total_ms_run" { ftInfo.GCStats.TotalMsRun = internal.ToInteger(gcStats[i+1]) continue } if internal.ToLower(internal.ToString(gcStats[i])) == "total_cycles" { ftInfo.GCStats.TotalCycles = internal.ToInteger(gcStats[i+1]) continue } if internal.ToLower(internal.ToString(gcStats[i])) == "average_cycle_time_ms" { ftInfo.GCStats.AverageCycleTimeMs = internal.ToString(gcStats[i+1]) continue } if internal.ToLower(internal.ToString(gcStats[i])) == "last_run_time_ms" { ftInfo.GCStats.LastRunTimeMs = internal.ToInteger(gcStats[i+1]) continue } if internal.ToLower(internal.ToString(gcStats[i])) == "gc_numeric_trees_missed" { ftInfo.GCStats.GCNumericTreesMissed = internal.ToInteger(gcStats[i+1]) continue } if internal.ToLower(internal.ToString(gcStats[i])) == "gc_blocks_denied" { ftInfo.GCStats.GCBlocksDenied = internal.ToInteger(gcStats[i+1]) continue } } } ftInfo.GeoshapesSzMB = internal.ToFloat(data["geoshapes_sz_mb"]) ftInfo.HashIndexingFailures = internal.ToInteger(data["hash_indexing_failures"]) if indexDef, ok := data["index_definition"].([]interface{}); ok { ftInfo.IndexDefinition = IndexDefinition{ KeyType: internal.ToString(indexDef[1]), Prefixes: internal.ToStringSlice(indexDef[3]), DefaultScore: internal.ToFloat(indexDef[5]), } } ftInfo.IndexName = internal.ToString(data["index_name"]) ftInfo.IndexOptions = internal.ToStringSlice(data["index_options"].([]interface{})) ftInfo.Indexing = internal.ToInteger(data["indexing"]) ftInfo.InvertedSzMB = internal.ToFloat(data["inverted_sz_mb"]) ftInfo.KeyTableSizeMB = internal.ToFloat(data["key_table_size_mb"]) ftInfo.MaxDocID = internal.ToInteger(data["max_doc_id"]) ftInfo.NumDocs = internal.ToInteger(data["num_docs"]) ftInfo.NumRecords = internal.ToInteger(data["num_records"]) ftInfo.NumTerms = internal.ToInteger(data["num_terms"]) ftInfo.NumberOfUses = internal.ToInteger(data["number_of_uses"]) ftInfo.OffsetBitsPerRecordAvg = internal.ToString(data["offset_bits_per_record_avg"]) ftInfo.OffsetVectorsSzMB = internal.ToFloat(data["offset_vectors_sz_mb"]) ftInfo.OffsetsPerTermAvg = internal.ToString(data["offsets_per_term_avg"]) ftInfo.PercentIndexed = internal.ToFloat(data["percent_indexed"]) ftInfo.RecordsPerDocAvg = internal.ToString(data["records_per_doc_avg"]) ftInfo.SortableValuesSizeMB = internal.ToFloat(data["sortable_values_size_mb"]) ftInfo.TagOverheadSzMB = internal.ToFloat(data["tag_overhead_sz_mb"]) ftInfo.TextOverheadSzMB = internal.ToFloat(data["text_overhead_sz_mb"]) ftInfo.TotalIndexMemorySzMB = internal.ToFloat(data["total_index_memory_sz_mb"]) ftInfo.TotalIndexingTime = internal.ToInteger(data["total_indexing_time"]) ftInfo.TotalInvertedIndexBlocks = internal.ToInteger(data["total_inverted_index_blocks"]) ftInfo.VectorIndexSzMB = internal.ToFloat(data["vector_index_sz_mb"]) return ftInfo, nil } type FTInfoCmd struct { baseCmd val FTInfoResult } func newFTInfoCmd(ctx context.Context, args ...interface{}) *FTInfoCmd { return &FTInfoCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *FTInfoCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *FTInfoCmd) SetVal(val FTInfoResult) { cmd.val = val } func (cmd *FTInfoCmd) Result() (FTInfoResult, error) { return cmd.val, cmd.err } func (cmd *FTInfoCmd) Val() FTInfoResult { return cmd.val } func (cmd *FTInfoCmd) RawVal() interface{} { return cmd.rawVal } func (cmd *FTInfoCmd) RawResult() (interface{}, error) { return cmd.rawVal, cmd.err } func (cmd *FTInfoCmd) readReply(rd *proto.Reader) (err error) { n, err := rd.ReadMapLen() if err != nil { return err } data := make(map[string]interface{}, n) for i := 0; i < n; i++ { k, err := rd.ReadString() if err != nil { return err } v, err := rd.ReadReply() if err != nil { if err == Nil { data[k] = Nil continue } if err, ok := err.(proto.RedisError); ok { data[k] = err continue } return err } data[k] = v } cmd.val, err = parseFTInfo(data) if err != nil { return err } return nil } // FTInfo - Retrieves information about an index. // The 'index' parameter specifies the index to retrieve information about. // For more information, please refer to the Redis documentation: // [FT.INFO]: (https://redis.io/commands/ft.info/) func (c cmdable) FTInfo(ctx context.Context, index string) *FTInfoCmd { cmd := newFTInfoCmd(ctx, "FT.INFO", index) _ = c(ctx, cmd) return cmd } // FTSpellCheck - Checks a query string for spelling errors. // For more details about spellcheck query please follow: // https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ // For more information, please refer to the Redis documentation: // [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) func (c cmdable) FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd { args := []interface{}{"FT.SPELLCHECK", index, query} cmd := newFTSpellCheckCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTSpellCheckWithArgs - Checks a query string for spelling errors with additional options. // For more details about spellcheck query please follow: // https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ // For more information, please refer to the Redis documentation: // [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) func (c cmdable) FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd { args := []interface{}{"FT.SPELLCHECK", index, query} if options != nil { if options.Distance > 0 { args = append(args, "DISTANCE", options.Distance) } if options.Terms != nil { args = append(args, "TERMS", options.Terms.Inclusion, options.Terms.Dictionary) args = append(args, options.Terms.Terms...) } if options.Dialect > 0 { args = append(args, "DIALECT", options.Dialect) } else { args = append(args, "DIALECT", 2) } } cmd := newFTSpellCheckCmd(ctx, args...) _ = c(ctx, cmd) return cmd } type FTSpellCheckCmd struct { baseCmd val []SpellCheckResult } func newFTSpellCheckCmd(ctx context.Context, args ...interface{}) *FTSpellCheckCmd { return &FTSpellCheckCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *FTSpellCheckCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *FTSpellCheckCmd) SetVal(val []SpellCheckResult) { cmd.val = val } func (cmd *FTSpellCheckCmd) Result() ([]SpellCheckResult, error) { return cmd.val, cmd.err } func (cmd *FTSpellCheckCmd) Val() []SpellCheckResult { return cmd.val } func (cmd *FTSpellCheckCmd) RawVal() interface{} { return cmd.rawVal } func (cmd *FTSpellCheckCmd) RawResult() (interface{}, error) { return cmd.rawVal, cmd.err } func (cmd *FTSpellCheckCmd) readReply(rd *proto.Reader) (err error) { data, err := rd.ReadSlice() if err != nil { return err } cmd.val, err = parseFTSpellCheck(data) if err != nil { return err } return nil } func parseFTSpellCheck(data []interface{}) ([]SpellCheckResult, error) { results := make([]SpellCheckResult, 0, len(data)) for _, termData := range data { termInfo, ok := termData.([]interface{}) if !ok || len(termInfo) != 3 { return nil, fmt.Errorf("invalid term format") } term, ok := termInfo[1].(string) if !ok { return nil, fmt.Errorf("invalid term format") } suggestionsData, ok := termInfo[2].([]interface{}) if !ok { return nil, fmt.Errorf("invalid suggestions format") } suggestions := make([]SpellCheckSuggestion, 0, len(suggestionsData)) for _, suggestionData := range suggestionsData { suggestionInfo, ok := suggestionData.([]interface{}) if !ok || len(suggestionInfo) != 2 { return nil, fmt.Errorf("invalid suggestion format") } scoreStr, ok := suggestionInfo[0].(string) if !ok { return nil, fmt.Errorf("invalid suggestion score format") } score, err := strconv.ParseFloat(scoreStr, 64) if err != nil { return nil, fmt.Errorf("invalid suggestion score value") } suggestion, ok := suggestionInfo[1].(string) if !ok { return nil, fmt.Errorf("invalid suggestion format") } suggestions = append(suggestions, SpellCheckSuggestion{ Score: score, Suggestion: suggestion, }) } results = append(results, SpellCheckResult{ Term: term, Suggestions: suggestions, }) } return results, nil } func parseFTSearch(data []interface{}, noContent, withScores, withPayloads, withSortKeys bool) (FTSearchResult, error) { if len(data) < 1 { return FTSearchResult{}, fmt.Errorf("unexpected search result format") } total, ok := data[0].(int64) if !ok { return FTSearchResult{}, fmt.Errorf("invalid total results format") } var results []Document for i := 1; i < len(data); { docID, ok := data[i].(string) if !ok { return FTSearchResult{}, fmt.Errorf("invalid document ID format") } doc := Document{ ID: docID, Fields: make(map[string]string), } i++ if noContent { results = append(results, doc) continue } if withScores && i < len(data) { if scoreStr, ok := data[i].(string); ok { score, err := strconv.ParseFloat(scoreStr, 64) if err != nil { return FTSearchResult{}, fmt.Errorf("invalid score format") } doc.Score = &score i++ } } if withPayloads && i < len(data) { if payload, ok := data[i].(string); ok { doc.Payload = &payload i++ } } if withSortKeys && i < len(data) { if sortKey, ok := data[i].(string); ok { doc.SortKey = &sortKey i++ } } if i < len(data) { fields, ok := data[i].([]interface{}) if !ok { return FTSearchResult{}, fmt.Errorf("invalid document fields format") } for j := 0; j < len(fields); j += 2 { key, ok := fields[j].(string) if !ok { return FTSearchResult{}, fmt.Errorf("invalid field key format") } value, ok := fields[j+1].(string) if !ok { return FTSearchResult{}, fmt.Errorf("invalid field value format") } doc.Fields[key] = value } i++ } results = append(results, doc) } return FTSearchResult{ Total: int(total), Docs: results, }, nil } type FTSearchCmd struct { baseCmd val FTSearchResult options *FTSearchOptions } func newFTSearchCmd(ctx context.Context, options *FTSearchOptions, args ...interface{}) *FTSearchCmd { return &FTSearchCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, options: options, } } func (cmd *FTSearchCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *FTSearchCmd) SetVal(val FTSearchResult) { cmd.val = val } func (cmd *FTSearchCmd) Result() (FTSearchResult, error) { return cmd.val, cmd.err } func (cmd *FTSearchCmd) Val() FTSearchResult { return cmd.val } func (cmd *FTSearchCmd) RawVal() interface{} { return cmd.rawVal } func (cmd *FTSearchCmd) RawResult() (interface{}, error) { return cmd.rawVal, cmd.err } func (cmd *FTSearchCmd) readReply(rd *proto.Reader) (err error) { data, err := rd.ReadSlice() if err != nil { return err } cmd.val, err = parseFTSearch(data, cmd.options.NoContent, cmd.options.WithScores, cmd.options.WithPayloads, cmd.options.WithSortKeys) if err != nil { return err } return nil } // FTSearch - Executes a search query on an index. // The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. // For more information, please refer to the Redis documentation about [FT.SEARCH]. // // [FT.SEARCH]: (https://redis.io/commands/ft.search/) func (c cmdable) FTSearch(ctx context.Context, index string, query string) *FTSearchCmd { args := []interface{}{"FT.SEARCH", index, query} cmd := newFTSearchCmd(ctx, &FTSearchOptions{}, args...) _ = c(ctx, cmd) return cmd } type SearchQuery []interface{} // FTSearchQuery - Executes a search query on an index with additional options. // The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, // and the 'options' parameter specifies additional options for the search. // For more information, please refer to the Redis documentation about [FT.SEARCH]. // // [FT.SEARCH]: (https://redis.io/commands/ft.search/) func FTSearchQuery(query string, options *FTSearchOptions) SearchQuery { queryArgs := []interface{}{query} if options != nil { if options.NoContent { queryArgs = append(queryArgs, "NOCONTENT") } if options.Verbatim { queryArgs = append(queryArgs, "VERBATIM") } if options.NoStopWords { queryArgs = append(queryArgs, "NOSTOPWORDS") } if options.WithScores { queryArgs = append(queryArgs, "WITHSCORES") } if options.WithPayloads { queryArgs = append(queryArgs, "WITHPAYLOADS") } if options.WithSortKeys { queryArgs = append(queryArgs, "WITHSORTKEYS") } if options.Filters != nil { for _, filter := range options.Filters { queryArgs = append(queryArgs, "FILTER", filter.FieldName, filter.Min, filter.Max) } } if options.GeoFilter != nil { for _, geoFilter := range options.GeoFilter { queryArgs = append(queryArgs, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) } } if options.InKeys != nil { queryArgs = append(queryArgs, "INKEYS", len(options.InKeys)) queryArgs = append(queryArgs, options.InKeys...) } if options.InFields != nil { queryArgs = append(queryArgs, "INFIELDS", len(options.InFields)) queryArgs = append(queryArgs, options.InFields...) } if options.Return != nil { queryArgs = append(queryArgs, "RETURN") queryArgsReturn := []interface{}{} for _, ret := range options.Return { queryArgsReturn = append(queryArgsReturn, ret.FieldName) if ret.As != "" { queryArgsReturn = append(queryArgsReturn, "AS", ret.As) } } queryArgs = append(queryArgs, len(queryArgsReturn)) queryArgs = append(queryArgs, queryArgsReturn...) } if options.Slop > 0 { queryArgs = append(queryArgs, "SLOP", options.Slop) } if options.Timeout > 0 { queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) } if options.InOrder { queryArgs = append(queryArgs, "INORDER") } if options.Language != "" { queryArgs = append(queryArgs, "LANGUAGE", options.Language) } if options.Expander != "" { queryArgs = append(queryArgs, "EXPANDER", options.Expander) } if options.Scorer != "" { queryArgs = append(queryArgs, "SCORER", options.Scorer) } if options.ExplainScore { queryArgs = append(queryArgs, "EXPLAINSCORE") } if options.Payload != "" { queryArgs = append(queryArgs, "PAYLOAD", options.Payload) } if options.SortBy != nil { queryArgs = append(queryArgs, "SORTBY") for _, sortBy := range options.SortBy { queryArgs = append(queryArgs, sortBy.FieldName) if sortBy.Asc && sortBy.Desc { panic("FT.SEARCH: ASC and DESC are mutually exclusive") } if sortBy.Asc { queryArgs = append(queryArgs, "ASC") } if sortBy.Desc { queryArgs = append(queryArgs, "DESC") } } if options.SortByWithCount { queryArgs = append(queryArgs, "WITHCOUNT") } } if options.LimitOffset >= 0 && options.Limit > 0 { queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) } if options.Params != nil { queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) for key, value := range options.Params { queryArgs = append(queryArgs, key, value) } } if options.DialectVersion > 0 { queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) } else { queryArgs = append(queryArgs, "DIALECT", 2) } } return queryArgs } // FTSearchWithArgs - Executes a search query on an index with additional options. // The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, // and the 'options' parameter specifies additional options for the search. // For more information, please refer to the Redis documentation about [FT.SEARCH]. // // [FT.SEARCH]: (https://redis.io/commands/ft.search/) func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd { args := []interface{}{"FT.SEARCH", index, query} if options != nil { if options.NoContent { args = append(args, "NOCONTENT") } if options.Verbatim { args = append(args, "VERBATIM") } if options.NoStopWords { args = append(args, "NOSTOPWORDS") } if options.WithScores { args = append(args, "WITHSCORES") } if options.WithPayloads { args = append(args, "WITHPAYLOADS") } if options.WithSortKeys { args = append(args, "WITHSORTKEYS") } if options.Filters != nil { for _, filter := range options.Filters { args = append(args, "FILTER", filter.FieldName, filter.Min, filter.Max) } } if options.GeoFilter != nil { for _, geoFilter := range options.GeoFilter { args = append(args, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) } } if options.InKeys != nil { args = append(args, "INKEYS", len(options.InKeys)) args = append(args, options.InKeys...) } if options.InFields != nil { args = append(args, "INFIELDS", len(options.InFields)) args = append(args, options.InFields...) } if options.Return != nil { args = append(args, "RETURN") argsReturn := []interface{}{} for _, ret := range options.Return { argsReturn = append(argsReturn, ret.FieldName) if ret.As != "" { argsReturn = append(argsReturn, "AS", ret.As) } } args = append(args, len(argsReturn)) args = append(args, argsReturn...) } if options.Slop > 0 { args = append(args, "SLOP", options.Slop) } if options.Timeout > 0 { args = append(args, "TIMEOUT", options.Timeout) } if options.InOrder { args = append(args, "INORDER") } if options.Language != "" { args = append(args, "LANGUAGE", options.Language) } if options.Expander != "" { args = append(args, "EXPANDER", options.Expander) } if options.Scorer != "" { args = append(args, "SCORER", options.Scorer) } if options.ExplainScore { args = append(args, "EXPLAINSCORE") } if options.Payload != "" { args = append(args, "PAYLOAD", options.Payload) } if options.SortBy != nil { args = append(args, "SORTBY") for _, sortBy := range options.SortBy { args = append(args, sortBy.FieldName) if sortBy.Asc && sortBy.Desc { panic("FT.SEARCH: ASC and DESC are mutually exclusive") } if sortBy.Asc { args = append(args, "ASC") } if sortBy.Desc { args = append(args, "DESC") } } if options.SortByWithCount { args = append(args, "WITHCOUNT") } } if options.CountOnly { args = append(args, "LIMIT", 0, 0) } else { if options.LimitOffset >= 0 && options.Limit > 0 || options.LimitOffset > 0 && options.Limit == 0 { args = append(args, "LIMIT", options.LimitOffset, options.Limit) } } if options.Params != nil { args = append(args, "PARAMS", len(options.Params)*2) for key, value := range options.Params { args = append(args, key, value) } } if options.DialectVersion > 0 { args = append(args, "DIALECT", options.DialectVersion) } else { args = append(args, "DIALECT", 2) } } cmd := newFTSearchCmd(ctx, options, args...) _ = c(ctx, cmd) return cmd } func NewFTSynDumpCmd(ctx context.Context, args ...interface{}) *FTSynDumpCmd { return &FTSynDumpCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *FTSynDumpCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *FTSynDumpCmd) SetVal(val []FTSynDumpResult) { cmd.val = val } func (cmd *FTSynDumpCmd) Val() []FTSynDumpResult { return cmd.val } func (cmd *FTSynDumpCmd) Result() ([]FTSynDumpResult, error) { return cmd.val, cmd.err } func (cmd *FTSynDumpCmd) RawVal() interface{} { return cmd.rawVal } func (cmd *FTSynDumpCmd) RawResult() (interface{}, error) { return cmd.rawVal, cmd.err } func (cmd *FTSynDumpCmd) readReply(rd *proto.Reader) error { termSynonymPairs, err := rd.ReadSlice() if err != nil { return err } var results []FTSynDumpResult for i := 0; i < len(termSynonymPairs); i += 2 { term, ok := termSynonymPairs[i].(string) if !ok { return fmt.Errorf("invalid term format") } synonyms, ok := termSynonymPairs[i+1].([]interface{}) if !ok { return fmt.Errorf("invalid synonyms format") } synonymList := make([]string, len(synonyms)) for j, syn := range synonyms { synonym, ok := syn.(string) if !ok { return fmt.Errorf("invalid synonym format") } synonymList[j] = synonym } results = append(results, FTSynDumpResult{ Term: term, Synonyms: synonymList, }) } cmd.val = results return nil } // FTSynDump - Dumps the contents of a synonym group. // The 'index' parameter specifies the index to dump. // For more information, please refer to the Redis documentation: // [FT.SYNDUMP]: (https://redis.io/commands/ft.syndump/) func (c cmdable) FTSynDump(ctx context.Context, index string) *FTSynDumpCmd { cmd := NewFTSynDumpCmd(ctx, "FT.SYNDUMP", index) _ = c(ctx, cmd) return cmd } // FTSynUpdate - Creates or updates a synonym group with additional terms. // The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, and the 'terms' parameter specifies the additional terms. // For more information, please refer to the Redis documentation: // [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) func (c cmdable) FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd { args := []interface{}{"FT.SYNUPDATE", index, synGroupId} args = append(args, terms...) cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTSynUpdateWithArgs - Creates or updates a synonym group with additional terms and options. // The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, the 'options' parameter specifies additional options for the update, and the 'terms' parameter specifies the additional terms. // For more information, please refer to the Redis documentation: // [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) func (c cmdable) FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd { args := []interface{}{"FT.SYNUPDATE", index, synGroupId} if options.SkipInitialScan { args = append(args, "SKIPINITIALSCAN") } args = append(args, terms...) cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // FTTagVals - Returns all distinct values indexed in a tag field. // The 'index' parameter specifies the index to check, and the 'field' parameter specifies the tag field to retrieve values from. // For more information, please refer to the Redis documentation: // [FT.TAGVALS]: (https://redis.io/commands/ft.tagvals/) func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "FT.TAGVALS", index, field) _ = c(ctx, cmd) return cmd } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/helper/0000755000000000000000000000000015024302467023174 5ustar rootrootdependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/helper/helper.go0000644000000000000000000000034115024302467025000 0ustar rootrootpackage helper import "github.com/redis/go-redis/v9/internal/util" func ParseFloat(s string) (float64, error) { return util.ParseStringToFloat(s) } func MustParseFloat(s string) float64 { return util.MustParseFloat(s) } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/command.go0000644000000000000000000034550015024302467023671 0ustar rootrootpackage redis import ( "bufio" "context" "fmt" "net" "regexp" "strconv" "strings" "sync" "time" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hscan" "github.com/redis/go-redis/v9/internal/proto" "github.com/redis/go-redis/v9/internal/util" ) type Cmder interface { // command name. // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster". Name() string // full command name. // e.g. "set k v ex 10" -> "set", "cluster info" -> "cluster info". FullName() string // all args of the command. // e.g. "set k v ex 10" -> "[set k v ex 10]". Args() []interface{} // format request and response string. // e.g. "set k v ex 10" -> "set k v ex 10: OK", "get k" -> "get k: v". String() string stringArg(int) string firstKeyPos() int8 SetFirstKeyPos(int8) readTimeout() *time.Duration readReply(rd *proto.Reader) error readRawReply(rd *proto.Reader) error SetErr(error) Err() error } func setCmdsErr(cmds []Cmder, e error) { for _, cmd := range cmds { if cmd.Err() == nil { cmd.SetErr(e) } } } func cmdsFirstErr(cmds []Cmder) error { for _, cmd := range cmds { if err := cmd.Err(); err != nil { return err } } return nil } func writeCmds(wr *proto.Writer, cmds []Cmder) error { for _, cmd := range cmds { if err := writeCmd(wr, cmd); err != nil { return err } } return nil } func writeCmd(wr *proto.Writer, cmd Cmder) error { return wr.WriteArgs(cmd.Args()) } func cmdFirstKeyPos(cmd Cmder) int { if pos := cmd.firstKeyPos(); pos != 0 { return int(pos) } switch cmd.Name() { case "eval", "evalsha", "eval_ro", "evalsha_ro": if cmd.stringArg(2) != "0" { return 3 } return 0 case "publish": return 1 case "memory": // https://github.com/redis/redis/issues/7493 if cmd.stringArg(1) == "usage" { return 2 } } return 1 } func cmdString(cmd Cmder, val interface{}) string { b := make([]byte, 0, 64) for i, arg := range cmd.Args() { if i > 0 { b = append(b, ' ') } b = internal.AppendArg(b, arg) } if err := cmd.Err(); err != nil { b = append(b, ": "...) b = append(b, err.Error()...) } else if val != nil { b = append(b, ": "...) b = internal.AppendArg(b, val) } return util.BytesToString(b) } //------------------------------------------------------------------------------ type baseCmd struct { ctx context.Context args []interface{} err error keyPos int8 rawVal interface{} _readTimeout *time.Duration } var _ Cmder = (*Cmd)(nil) func (cmd *baseCmd) Name() string { if len(cmd.args) == 0 { return "" } // Cmd name must be lower cased. return internal.ToLower(cmd.stringArg(0)) } func (cmd *baseCmd) FullName() string { switch name := cmd.Name(); name { case "cluster", "command": if len(cmd.args) == 1 { return name } if s2, ok := cmd.args[1].(string); ok { return name + " " + s2 } return name default: return name } } func (cmd *baseCmd) Args() []interface{} { return cmd.args } func (cmd *baseCmd) stringArg(pos int) string { if pos < 0 || pos >= len(cmd.args) { return "" } arg := cmd.args[pos] switch v := arg.(type) { case string: return v case []byte: return string(v) default: // TODO: consider using appendArg return fmt.Sprint(v) } } func (cmd *baseCmd) firstKeyPos() int8 { return cmd.keyPos } func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { cmd.keyPos = keyPos } func (cmd *baseCmd) SetErr(e error) { cmd.err = e } func (cmd *baseCmd) Err() error { return cmd.err } func (cmd *baseCmd) readTimeout() *time.Duration { return cmd._readTimeout } func (cmd *baseCmd) setReadTimeout(d time.Duration) { cmd._readTimeout = &d } func (cmd *baseCmd) readRawReply(rd *proto.Reader) (err error) { cmd.rawVal, err = rd.ReadReply() return err } //------------------------------------------------------------------------------ type Cmd struct { baseCmd val interface{} } func NewCmd(ctx context.Context, args ...interface{}) *Cmd { return &Cmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *Cmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *Cmd) SetVal(val interface{}) { cmd.val = val } func (cmd *Cmd) Val() interface{} { return cmd.val } func (cmd *Cmd) Result() (interface{}, error) { return cmd.val, cmd.err } func (cmd *Cmd) Text() (string, error) { if cmd.err != nil { return "", cmd.err } return toString(cmd.val) } func toString(val interface{}) (string, error) { switch val := val.(type) { case string: return val, nil default: err := fmt.Errorf("redis: unexpected type=%T for String", val) return "", err } } func (cmd *Cmd) Int() (int, error) { if cmd.err != nil { return 0, cmd.err } switch val := cmd.val.(type) { case int64: return int(val), nil case string: return strconv.Atoi(val) default: err := fmt.Errorf("redis: unexpected type=%T for Int", val) return 0, err } } func (cmd *Cmd) Int64() (int64, error) { if cmd.err != nil { return 0, cmd.err } return toInt64(cmd.val) } func toInt64(val interface{}) (int64, error) { switch val := val.(type) { case int64: return val, nil case string: return strconv.ParseInt(val, 10, 64) default: err := fmt.Errorf("redis: unexpected type=%T for Int64", val) return 0, err } } func (cmd *Cmd) Uint64() (uint64, error) { if cmd.err != nil { return 0, cmd.err } return toUint64(cmd.val) } func toUint64(val interface{}) (uint64, error) { switch val := val.(type) { case int64: return uint64(val), nil case string: return strconv.ParseUint(val, 10, 64) default: err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) return 0, err } } func (cmd *Cmd) Float32() (float32, error) { if cmd.err != nil { return 0, cmd.err } return toFloat32(cmd.val) } func toFloat32(val interface{}) (float32, error) { switch val := val.(type) { case int64: return float32(val), nil case string: f, err := strconv.ParseFloat(val, 32) if err != nil { return 0, err } return float32(f), nil default: err := fmt.Errorf("redis: unexpected type=%T for Float32", val) return 0, err } } func (cmd *Cmd) Float64() (float64, error) { if cmd.err != nil { return 0, cmd.err } return toFloat64(cmd.val) } func toFloat64(val interface{}) (float64, error) { switch val := val.(type) { case int64: return float64(val), nil case string: return strconv.ParseFloat(val, 64) default: err := fmt.Errorf("redis: unexpected type=%T for Float64", val) return 0, err } } func (cmd *Cmd) Bool() (bool, error) { if cmd.err != nil { return false, cmd.err } return toBool(cmd.val) } func toBool(val interface{}) (bool, error) { switch val := val.(type) { case bool: return val, nil case int64: return val != 0, nil case string: return strconv.ParseBool(val) default: err := fmt.Errorf("redis: unexpected type=%T for Bool", val) return false, err } } func (cmd *Cmd) Slice() ([]interface{}, error) { if cmd.err != nil { return nil, cmd.err } switch val := cmd.val.(type) { case []interface{}: return val, nil default: return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) } } func (cmd *Cmd) StringSlice() ([]string, error) { slice, err := cmd.Slice() if err != nil { return nil, err } ss := make([]string, len(slice)) for i, iface := range slice { val, err := toString(iface) if err != nil { return nil, err } ss[i] = val } return ss, nil } func (cmd *Cmd) Int64Slice() ([]int64, error) { slice, err := cmd.Slice() if err != nil { return nil, err } nums := make([]int64, len(slice)) for i, iface := range slice { val, err := toInt64(iface) if err != nil { return nil, err } nums[i] = val } return nums, nil } func (cmd *Cmd) Uint64Slice() ([]uint64, error) { slice, err := cmd.Slice() if err != nil { return nil, err } nums := make([]uint64, len(slice)) for i, iface := range slice { val, err := toUint64(iface) if err != nil { return nil, err } nums[i] = val } return nums, nil } func (cmd *Cmd) Float32Slice() ([]float32, error) { slice, err := cmd.Slice() if err != nil { return nil, err } floats := make([]float32, len(slice)) for i, iface := range slice { val, err := toFloat32(iface) if err != nil { return nil, err } floats[i] = val } return floats, nil } func (cmd *Cmd) Float64Slice() ([]float64, error) { slice, err := cmd.Slice() if err != nil { return nil, err } floats := make([]float64, len(slice)) for i, iface := range slice { val, err := toFloat64(iface) if err != nil { return nil, err } floats[i] = val } return floats, nil } func (cmd *Cmd) BoolSlice() ([]bool, error) { slice, err := cmd.Slice() if err != nil { return nil, err } bools := make([]bool, len(slice)) for i, iface := range slice { val, err := toBool(iface) if err != nil { return nil, err } bools[i] = val } return bools, nil } func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { cmd.val, err = rd.ReadReply() return err } //------------------------------------------------------------------------------ type SliceCmd struct { baseCmd val []interface{} } var _ Cmder = (*SliceCmd)(nil) func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { return &SliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *SliceCmd) SetVal(val []interface{}) { cmd.val = val } func (cmd *SliceCmd) Val() []interface{} { return cmd.val } func (cmd *SliceCmd) Result() ([]interface{}, error) { return cmd.val, cmd.err } func (cmd *SliceCmd) String() string { return cmdString(cmd, cmd.val) } // Scan scans the results from the map into a destination struct. The map keys // are matched in the Redis struct fields by the `redis:"field"` tag. func (cmd *SliceCmd) Scan(dst interface{}) error { if cmd.err != nil { return cmd.err } // Pass the list of keys and values. // Skip the first two args for: HMGET key var args []interface{} if cmd.args[0] == "hmget" { args = cmd.args[2:] } else { // Otherwise, it's: MGET field field ... args = cmd.args[1:] } return hscan.Scan(dst, args, cmd.val) } func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) { cmd.val, err = rd.ReadSlice() return err } //------------------------------------------------------------------------------ type StatusCmd struct { baseCmd val string } var _ Cmder = (*StatusCmd)(nil) func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { return &StatusCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *StatusCmd) SetVal(val string) { cmd.val = val } func (cmd *StatusCmd) Val() string { return cmd.val } func (cmd *StatusCmd) Result() (string, error) { return cmd.val, cmd.err } func (cmd *StatusCmd) Bytes() ([]byte, error) { return util.StringToBytes(cmd.val), cmd.err } func (cmd *StatusCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { cmd.val, err = rd.ReadString() return err } //------------------------------------------------------------------------------ type IntCmd struct { baseCmd val int64 } var _ Cmder = (*IntCmd)(nil) func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { return &IntCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *IntCmd) SetVal(val int64) { cmd.val = val } func (cmd *IntCmd) Val() int64 { return cmd.val } func (cmd *IntCmd) Result() (int64, error) { return cmd.val, cmd.err } func (cmd *IntCmd) Uint64() (uint64, error) { return uint64(cmd.val), cmd.err } func (cmd *IntCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { cmd.val, err = rd.ReadInt() return err } //------------------------------------------------------------------------------ type IntSliceCmd struct { baseCmd val []int64 } var _ Cmder = (*IntSliceCmd)(nil) func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { return &IntSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *IntSliceCmd) SetVal(val []int64) { cmd.val = val } func (cmd *IntSliceCmd) Val() []int64 { return cmd.val } func (cmd *IntSliceCmd) Result() ([]int64, error) { return cmd.val, cmd.err } func (cmd *IntSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]int64, n) for i := 0; i < len(cmd.val); i++ { if cmd.val[i], err = rd.ReadInt(); err != nil { return err } } return nil } //------------------------------------------------------------------------------ type DurationCmd struct { baseCmd val time.Duration precision time.Duration } var _ Cmder = (*DurationCmd)(nil) func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { return &DurationCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, precision: precision, } } func (cmd *DurationCmd) SetVal(val time.Duration) { cmd.val = val } func (cmd *DurationCmd) Val() time.Duration { return cmd.val } func (cmd *DurationCmd) Result() (time.Duration, error) { return cmd.val, cmd.err } func (cmd *DurationCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *DurationCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadInt() if err != nil { return err } switch n { // -2 if the key does not exist // -1 if the key exists but has no associated expire case -2, -1: cmd.val = time.Duration(n) default: cmd.val = time.Duration(n) * cmd.precision } return nil } //------------------------------------------------------------------------------ type TimeCmd struct { baseCmd val time.Time } var _ Cmder = (*TimeCmd)(nil) func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { return &TimeCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *TimeCmd) SetVal(val time.Time) { cmd.val = val } func (cmd *TimeCmd) Val() time.Time { return cmd.val } func (cmd *TimeCmd) Result() (time.Time, error) { return cmd.val, cmd.err } func (cmd *TimeCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *TimeCmd) readReply(rd *proto.Reader) error { if err := rd.ReadFixedArrayLen(2); err != nil { return err } second, err := rd.ReadInt() if err != nil { return err } microsecond, err := rd.ReadInt() if err != nil { return err } cmd.val = time.Unix(second, microsecond*1000) return nil } //------------------------------------------------------------------------------ type BoolCmd struct { baseCmd val bool } var _ Cmder = (*BoolCmd)(nil) func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { return &BoolCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *BoolCmd) SetVal(val bool) { cmd.val = val } func (cmd *BoolCmd) Val() bool { return cmd.val } func (cmd *BoolCmd) Result() (bool, error) { return cmd.val, cmd.err } func (cmd *BoolCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) { cmd.val, err = rd.ReadBool() // `SET key value NX` returns nil when key already exists. But // `SETNX key value` returns bool (0/1). So convert nil to bool. if err == Nil { cmd.val = false err = nil } return err } //------------------------------------------------------------------------------ type StringCmd struct { baseCmd val string } var _ Cmder = (*StringCmd)(nil) func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { return &StringCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *StringCmd) SetVal(val string) { cmd.val = val } func (cmd *StringCmd) Val() string { return cmd.val } func (cmd *StringCmd) Result() (string, error) { return cmd.val, cmd.err } func (cmd *StringCmd) Bytes() ([]byte, error) { return util.StringToBytes(cmd.val), cmd.err } func (cmd *StringCmd) Bool() (bool, error) { if cmd.err != nil { return false, cmd.err } return strconv.ParseBool(cmd.val) } func (cmd *StringCmd) Int() (int, error) { if cmd.err != nil { return 0, cmd.err } return strconv.Atoi(cmd.Val()) } func (cmd *StringCmd) Int64() (int64, error) { if cmd.err != nil { return 0, cmd.err } return strconv.ParseInt(cmd.Val(), 10, 64) } func (cmd *StringCmd) Uint64() (uint64, error) { if cmd.err != nil { return 0, cmd.err } return strconv.ParseUint(cmd.Val(), 10, 64) } func (cmd *StringCmd) Float32() (float32, error) { if cmd.err != nil { return 0, cmd.err } f, err := strconv.ParseFloat(cmd.Val(), 32) if err != nil { return 0, err } return float32(f), nil } func (cmd *StringCmd) Float64() (float64, error) { if cmd.err != nil { return 0, cmd.err } return strconv.ParseFloat(cmd.Val(), 64) } func (cmd *StringCmd) Time() (time.Time, error) { if cmd.err != nil { return time.Time{}, cmd.err } return time.Parse(time.RFC3339Nano, cmd.Val()) } func (cmd *StringCmd) Scan(val interface{}) error { if cmd.err != nil { return cmd.err } return proto.Scan([]byte(cmd.val), val) } func (cmd *StringCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { cmd.val, err = rd.ReadString() return err } //------------------------------------------------------------------------------ type FloatCmd struct { baseCmd val float64 } var _ Cmder = (*FloatCmd)(nil) func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { return &FloatCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *FloatCmd) SetVal(val float64) { cmd.val = val } func (cmd *FloatCmd) Val() float64 { return cmd.val } func (cmd *FloatCmd) Result() (float64, error) { return cmd.val, cmd.err } func (cmd *FloatCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { cmd.val, err = rd.ReadFloat() return err } //------------------------------------------------------------------------------ type FloatSliceCmd struct { baseCmd val []float64 } var _ Cmder = (*FloatSliceCmd)(nil) func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { return &FloatSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *FloatSliceCmd) SetVal(val []float64) { cmd.val = val } func (cmd *FloatSliceCmd) Val() []float64 { return cmd.val } func (cmd *FloatSliceCmd) Result() ([]float64, error) { return cmd.val, cmd.err } func (cmd *FloatSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]float64, n) for i := 0; i < len(cmd.val); i++ { switch num, err := rd.ReadFloat(); { case err == Nil: cmd.val[i] = 0 case err != nil: return err default: cmd.val[i] = num } } return nil } //------------------------------------------------------------------------------ type StringSliceCmd struct { baseCmd val []string } var _ Cmder = (*StringSliceCmd)(nil) func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { return &StringSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *StringSliceCmd) SetVal(val []string) { cmd.val = val } func (cmd *StringSliceCmd) Val() []string { return cmd.val } func (cmd *StringSliceCmd) Result() ([]string, error) { return cmd.val, cmd.err } func (cmd *StringSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { return proto.ScanSlice(cmd.Val(), container) } func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]string, n) for i := 0; i < len(cmd.val); i++ { switch s, err := rd.ReadString(); { case err == Nil: cmd.val[i] = "" case err != nil: return err default: cmd.val[i] = s } } return nil } //------------------------------------------------------------------------------ type KeyValue struct { Key string Value string } type KeyValueSliceCmd struct { baseCmd val []KeyValue } var _ Cmder = (*KeyValueSliceCmd)(nil) func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd { return &KeyValueSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) { cmd.val = val } func (cmd *KeyValueSliceCmd) Val() []KeyValue { return cmd.val } func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) { return cmd.val, cmd.err } func (cmd *KeyValueSliceCmd) String() string { return cmdString(cmd, cmd.val) } // Many commands will respond to two formats: // 1. 1) "one" // 2. (double) 1 // 2. 1) "two" // 2. (double) 2 // // OR: // 1. "two" // 2. (double) 2 // 3. "one" // 4. (double) 1 func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl n, err := rd.ReadArrayLen() if err != nil { return err } // If the n is 0, can't continue reading. if n == 0 { cmd.val = make([]KeyValue, 0) return nil } typ, err := rd.PeekReplyType() if err != nil { return err } array := typ == proto.RespArray if array { cmd.val = make([]KeyValue, n) } else { cmd.val = make([]KeyValue, n/2) } for i := 0; i < len(cmd.val); i++ { if array { if err = rd.ReadFixedArrayLen(2); err != nil { return err } } if cmd.val[i].Key, err = rd.ReadString(); err != nil { return err } if cmd.val[i].Value, err = rd.ReadString(); err != nil { return err } } return nil } //------------------------------------------------------------------------------ type BoolSliceCmd struct { baseCmd val []bool } var _ Cmder = (*BoolSliceCmd)(nil) func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { return &BoolSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *BoolSliceCmd) SetVal(val []bool) { cmd.val = val } func (cmd *BoolSliceCmd) Val() []bool { return cmd.val } func (cmd *BoolSliceCmd) Result() ([]bool, error) { return cmd.val, cmd.err } func (cmd *BoolSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]bool, n) for i := 0; i < len(cmd.val); i++ { if cmd.val[i], err = rd.ReadBool(); err != nil { return err } } return nil } //------------------------------------------------------------------------------ type MapStringStringCmd struct { baseCmd val map[string]string } var _ Cmder = (*MapStringStringCmd)(nil) func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd { return &MapStringStringCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *MapStringStringCmd) Val() map[string]string { return cmd.val } func (cmd *MapStringStringCmd) SetVal(val map[string]string) { cmd.val = val } func (cmd *MapStringStringCmd) Result() (map[string]string, error) { return cmd.val, cmd.err } func (cmd *MapStringStringCmd) String() string { return cmdString(cmd, cmd.val) } // Scan scans the results from the map into a destination struct. The map keys // are matched in the Redis struct fields by the `redis:"field"` tag. func (cmd *MapStringStringCmd) Scan(dest interface{}) error { if cmd.err != nil { return cmd.err } strct, err := hscan.Struct(dest) if err != nil { return err } for k, v := range cmd.val { if err := strct.Scan(k, v); err != nil { return err } } return nil } func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadMapLen() if err != nil { return err } cmd.val = make(map[string]string, n) for i := 0; i < n; i++ { key, err := rd.ReadString() if err != nil { return err } value, err := rd.ReadString() if err != nil { return err } cmd.val[key] = value } return nil } //------------------------------------------------------------------------------ type MapStringIntCmd struct { baseCmd val map[string]int64 } var _ Cmder = (*MapStringIntCmd)(nil) func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd { return &MapStringIntCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *MapStringIntCmd) SetVal(val map[string]int64) { cmd.val = val } func (cmd *MapStringIntCmd) Val() map[string]int64 { return cmd.val } func (cmd *MapStringIntCmd) Result() (map[string]int64, error) { return cmd.val, cmd.err } func (cmd *MapStringIntCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadMapLen() if err != nil { return err } cmd.val = make(map[string]int64, n) for i := 0; i < n; i++ { key, err := rd.ReadString() if err != nil { return err } nn, err := rd.ReadInt() if err != nil { return err } cmd.val[key] = nn } return nil } // ------------------------------------------------------------------------------ type MapStringSliceInterfaceCmd struct { baseCmd val map[string][]interface{} } func NewMapStringSliceInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringSliceInterfaceCmd { return &MapStringSliceInterfaceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *MapStringSliceInterfaceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *MapStringSliceInterfaceCmd) SetVal(val map[string][]interface{}) { cmd.val = val } func (cmd *MapStringSliceInterfaceCmd) Result() (map[string][]interface{}, error) { return cmd.val, cmd.err } func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} { return cmd.val } func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) { readType, err := rd.PeekReplyType() if err != nil { return err } cmd.val = make(map[string][]interface{}) switch readType { case proto.RespMap: n, err := rd.ReadMapLen() if err != nil { return err } for i := 0; i < n; i++ { k, err := rd.ReadString() if err != nil { return err } nn, err := rd.ReadArrayLen() if err != nil { return err } cmd.val[k] = make([]interface{}, nn) for j := 0; j < nn; j++ { value, err := rd.ReadReply() if err != nil { return err } cmd.val[k][j] = value } } case proto.RespArray: // RESP2 response n, err := rd.ReadArrayLen() if err != nil { return err } for i := 0; i < n; i++ { // Each entry in this array is itself an array with key details itemLen, err := rd.ReadArrayLen() if err != nil { return err } key, err := rd.ReadString() if err != nil { return err } cmd.val[key] = make([]interface{}, 0, itemLen-1) for j := 1; j < itemLen; j++ { // Read the inner array for timestamp-value pairs data, err := rd.ReadReply() if err != nil { return err } cmd.val[key] = append(cmd.val[key], data) } } } return nil } //------------------------------------------------------------------------------ type StringStructMapCmd struct { baseCmd val map[string]struct{} } var _ Cmder = (*StringStructMapCmd)(nil) func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { return &StringStructMapCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { cmd.val = val } func (cmd *StringStructMapCmd) Val() map[string]struct{} { return cmd.val } func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { return cmd.val, cmd.err } func (cmd *StringStructMapCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make(map[string]struct{}, n) for i := 0; i < n; i++ { key, err := rd.ReadString() if err != nil { return err } cmd.val[key] = struct{}{} } return nil } //------------------------------------------------------------------------------ type XMessage struct { ID string Values map[string]interface{} } type XMessageSliceCmd struct { baseCmd val []XMessage } var _ Cmder = (*XMessageSliceCmd)(nil) func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { return &XMessageSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { cmd.val = val } func (cmd *XMessageSliceCmd) Val() []XMessage { return cmd.val } func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { return cmd.val, cmd.err } func (cmd *XMessageSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) { cmd.val, err = readXMessageSlice(rd) return err } func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { n, err := rd.ReadArrayLen() if err != nil { return nil, err } msgs := make([]XMessage, n) for i := 0; i < len(msgs); i++ { if msgs[i], err = readXMessage(rd); err != nil { return nil, err } } return msgs, nil } func readXMessage(rd *proto.Reader) (XMessage, error) { if err := rd.ReadFixedArrayLen(2); err != nil { return XMessage{}, err } id, err := rd.ReadString() if err != nil { return XMessage{}, err } v, err := stringInterfaceMapParser(rd) if err != nil { if err != proto.Nil { return XMessage{}, err } } return XMessage{ ID: id, Values: v, }, nil } func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) { n, err := rd.ReadMapLen() if err != nil { return nil, err } m := make(map[string]interface{}, n) for i := 0; i < n; i++ { key, err := rd.ReadString() if err != nil { return nil, err } value, err := rd.ReadString() if err != nil { return nil, err } m[key] = value } return m, nil } //------------------------------------------------------------------------------ type XStream struct { Stream string Messages []XMessage } type XStreamSliceCmd struct { baseCmd val []XStream } var _ Cmder = (*XStreamSliceCmd)(nil) func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { return &XStreamSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *XStreamSliceCmd) SetVal(val []XStream) { cmd.val = val } func (cmd *XStreamSliceCmd) Val() []XStream { return cmd.val } func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { return cmd.val, cmd.err } func (cmd *XStreamSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { typ, err := rd.PeekReplyType() if err != nil { return err } var n int if typ == proto.RespMap { n, err = rd.ReadMapLen() } else { n, err = rd.ReadArrayLen() } if err != nil { return err } cmd.val = make([]XStream, n) for i := 0; i < len(cmd.val); i++ { if typ != proto.RespMap { if err = rd.ReadFixedArrayLen(2); err != nil { return err } } if cmd.val[i].Stream, err = rd.ReadString(); err != nil { return err } if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil { return err } } return nil } //------------------------------------------------------------------------------ type XPending struct { Count int64 Lower string Higher string Consumers map[string]int64 } type XPendingCmd struct { baseCmd val *XPending } var _ Cmder = (*XPendingCmd)(nil) func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { return &XPendingCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *XPendingCmd) SetVal(val *XPending) { cmd.val = val } func (cmd *XPendingCmd) Val() *XPending { return cmd.val } func (cmd *XPendingCmd) Result() (*XPending, error) { return cmd.val, cmd.err } func (cmd *XPendingCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { var err error if err = rd.ReadFixedArrayLen(4); err != nil { return err } cmd.val = &XPending{} if cmd.val.Count, err = rd.ReadInt(); err != nil { return err } if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil { return err } if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil { return err } n, err := rd.ReadArrayLen() if err != nil && err != Nil { return err } cmd.val.Consumers = make(map[string]int64, n) for i := 0; i < n; i++ { if err = rd.ReadFixedArrayLen(2); err != nil { return err } consumerName, err := rd.ReadString() if err != nil { return err } consumerPending, err := rd.ReadInt() if err != nil { return err } cmd.val.Consumers[consumerName] = consumerPending } return nil } //------------------------------------------------------------------------------ type XPendingExt struct { ID string Consumer string Idle time.Duration RetryCount int64 } type XPendingExtCmd struct { baseCmd val []XPendingExt } var _ Cmder = (*XPendingExtCmd)(nil) func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { return &XPendingExtCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { cmd.val = val } func (cmd *XPendingExtCmd) Val() []XPendingExt { return cmd.val } func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { return cmd.val, cmd.err } func (cmd *XPendingExtCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]XPendingExt, n) for i := 0; i < len(cmd.val); i++ { if err = rd.ReadFixedArrayLen(4); err != nil { return err } if cmd.val[i].ID, err = rd.ReadString(); err != nil { return err } if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil { return err } idle, err := rd.ReadInt() if err != nil && err != Nil { return err } cmd.val[i].Idle = time.Duration(idle) * time.Millisecond if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil { return err } } return nil } //------------------------------------------------------------------------------ type XAutoClaimCmd struct { baseCmd start string val []XMessage } var _ Cmder = (*XAutoClaimCmd)(nil) func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { return &XAutoClaimCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { cmd.val = val cmd.start = start } func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { return cmd.val, cmd.start } func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { return cmd.val, cmd.start, cmd.err } func (cmd *XAutoClaimCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } switch n { case 2, // Redis 6 3: // Redis 7: // ok default: return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n) } cmd.start, err = rd.ReadString() if err != nil { return err } cmd.val, err = readXMessageSlice(rd) if err != nil { return err } if n >= 3 { if err := rd.DiscardNext(); err != nil { return err } } return nil } //------------------------------------------------------------------------------ type XAutoClaimJustIDCmd struct { baseCmd start string val []string } var _ Cmder = (*XAutoClaimJustIDCmd)(nil) func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { return &XAutoClaimJustIDCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { cmd.val = val cmd.start = start } func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { return cmd.val, cmd.start } func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { return cmd.val, cmd.start, cmd.err } func (cmd *XAutoClaimJustIDCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } switch n { case 2, // Redis 6 3: // Redis 7: // ok default: return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n) } cmd.start, err = rd.ReadString() if err != nil { return err } nn, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]string, nn) for i := 0; i < nn; i++ { cmd.val[i], err = rd.ReadString() if err != nil { return err } } if n >= 3 { if err := rd.DiscardNext(); err != nil { return err } } return nil } //------------------------------------------------------------------------------ type XInfoConsumersCmd struct { baseCmd val []XInfoConsumer } type XInfoConsumer struct { Name string Pending int64 Idle time.Duration Inactive time.Duration } var _ Cmder = (*XInfoConsumersCmd)(nil) func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { return &XInfoConsumersCmd{ baseCmd: baseCmd{ ctx: ctx, args: []interface{}{"xinfo", "consumers", stream, group}, }, } } func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { cmd.val = val } func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { return cmd.val } func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { return cmd.val, cmd.err } func (cmd *XInfoConsumersCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]XInfoConsumer, n) for i := 0; i < len(cmd.val); i++ { nn, err := rd.ReadMapLen() if err != nil { return err } var key string for f := 0; f < nn; f++ { key, err = rd.ReadString() if err != nil { return err } switch key { case "name": cmd.val[i].Name, err = rd.ReadString() case "pending": cmd.val[i].Pending, err = rd.ReadInt() case "idle": var idle int64 idle, err = rd.ReadInt() cmd.val[i].Idle = time.Duration(idle) * time.Millisecond case "inactive": var inactive int64 inactive, err = rd.ReadInt() cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond default: return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) } if err != nil { return err } } } return nil } //------------------------------------------------------------------------------ type XInfoGroupsCmd struct { baseCmd val []XInfoGroup } type XInfoGroup struct { Name string Consumers int64 Pending int64 LastDeliveredID string EntriesRead int64 // Lag represents the number of pending messages in the stream not yet // delivered to this consumer group. Returns -1 when the lag cannot be determined. Lag int64 } var _ Cmder = (*XInfoGroupsCmd)(nil) func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { return &XInfoGroupsCmd{ baseCmd: baseCmd{ ctx: ctx, args: []interface{}{"xinfo", "groups", stream}, }, } } func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { cmd.val = val } func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { return cmd.val } func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { return cmd.val, cmd.err } func (cmd *XInfoGroupsCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]XInfoGroup, n) for i := 0; i < len(cmd.val); i++ { group := &cmd.val[i] nn, err := rd.ReadMapLen() if err != nil { return err } var key string for j := 0; j < nn; j++ { key, err = rd.ReadString() if err != nil { return err } switch key { case "name": group.Name, err = rd.ReadString() if err != nil { return err } case "consumers": group.Consumers, err = rd.ReadInt() if err != nil { return err } case "pending": group.Pending, err = rd.ReadInt() if err != nil { return err } case "last-delivered-id": group.LastDeliveredID, err = rd.ReadString() if err != nil { return err } case "entries-read": group.EntriesRead, err = rd.ReadInt() if err != nil && err != Nil { return err } case "lag": group.Lag, err = rd.ReadInt() // lag: the number of entries in the stream that are still waiting to be delivered // to the group's consumers, or a NULL(Nil) when that number can't be determined. // In that case, we return -1. if err != nil && err != Nil { return err } else if err == Nil { group.Lag = -1 } default: return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key) } } } return nil } //------------------------------------------------------------------------------ type XInfoStreamCmd struct { baseCmd val *XInfoStream } type XInfoStream struct { Length int64 RadixTreeKeys int64 RadixTreeNodes int64 Groups int64 LastGeneratedID string MaxDeletedEntryID string EntriesAdded int64 FirstEntry XMessage LastEntry XMessage RecordedFirstEntryID string } var _ Cmder = (*XInfoStreamCmd)(nil) func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { return &XInfoStreamCmd{ baseCmd: baseCmd{ ctx: ctx, args: []interface{}{"xinfo", "stream", stream}, }, } } func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { cmd.val = val } func (cmd *XInfoStreamCmd) Val() *XInfoStream { return cmd.val } func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { return cmd.val, cmd.err } func (cmd *XInfoStreamCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadMapLen() if err != nil { return err } cmd.val = &XInfoStream{} for i := 0; i < n; i++ { key, err := rd.ReadString() if err != nil { return err } switch key { case "length": cmd.val.Length, err = rd.ReadInt() if err != nil { return err } case "radix-tree-keys": cmd.val.RadixTreeKeys, err = rd.ReadInt() if err != nil { return err } case "radix-tree-nodes": cmd.val.RadixTreeNodes, err = rd.ReadInt() if err != nil { return err } case "groups": cmd.val.Groups, err = rd.ReadInt() if err != nil { return err } case "last-generated-id": cmd.val.LastGeneratedID, err = rd.ReadString() if err != nil { return err } case "max-deleted-entry-id": cmd.val.MaxDeletedEntryID, err = rd.ReadString() if err != nil { return err } case "entries-added": cmd.val.EntriesAdded, err = rd.ReadInt() if err != nil { return err } case "first-entry": cmd.val.FirstEntry, err = readXMessage(rd) if err != nil && err != Nil { return err } case "last-entry": cmd.val.LastEntry, err = readXMessage(rd) if err != nil && err != Nil { return err } case "recorded-first-entry-id": cmd.val.RecordedFirstEntryID, err = rd.ReadString() if err != nil { return err } default: return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key) } } return nil } //------------------------------------------------------------------------------ type XInfoStreamFullCmd struct { baseCmd val *XInfoStreamFull } type XInfoStreamFull struct { Length int64 RadixTreeKeys int64 RadixTreeNodes int64 LastGeneratedID string MaxDeletedEntryID string EntriesAdded int64 Entries []XMessage Groups []XInfoStreamGroup RecordedFirstEntryID string } type XInfoStreamGroup struct { Name string LastDeliveredID string EntriesRead int64 Lag int64 PelCount int64 Pending []XInfoStreamGroupPending Consumers []XInfoStreamConsumer } type XInfoStreamGroupPending struct { ID string Consumer string DeliveryTime time.Time DeliveryCount int64 } type XInfoStreamConsumer struct { Name string SeenTime time.Time ActiveTime time.Time PelCount int64 Pending []XInfoStreamConsumerPending } type XInfoStreamConsumerPending struct { ID string DeliveryTime time.Time DeliveryCount int64 } var _ Cmder = (*XInfoStreamFullCmd)(nil) func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { return &XInfoStreamFullCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { cmd.val = val } func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { return cmd.val } func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { return cmd.val, cmd.err } func (cmd *XInfoStreamFullCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadMapLen() if err != nil { return err } cmd.val = &XInfoStreamFull{} for i := 0; i < n; i++ { key, err := rd.ReadString() if err != nil { return err } switch key { case "length": cmd.val.Length, err = rd.ReadInt() if err != nil { return err } case "radix-tree-keys": cmd.val.RadixTreeKeys, err = rd.ReadInt() if err != nil { return err } case "radix-tree-nodes": cmd.val.RadixTreeNodes, err = rd.ReadInt() if err != nil { return err } case "last-generated-id": cmd.val.LastGeneratedID, err = rd.ReadString() if err != nil { return err } case "entries-added": cmd.val.EntriesAdded, err = rd.ReadInt() if err != nil { return err } case "entries": cmd.val.Entries, err = readXMessageSlice(rd) if err != nil { return err } case "groups": cmd.val.Groups, err = readStreamGroups(rd) if err != nil { return err } case "max-deleted-entry-id": cmd.val.MaxDeletedEntryID, err = rd.ReadString() if err != nil { return err } case "recorded-first-entry-id": cmd.val.RecordedFirstEntryID, err = rd.ReadString() if err != nil { return err } default: return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) } } return nil } func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { n, err := rd.ReadArrayLen() if err != nil { return nil, err } groups := make([]XInfoStreamGroup, 0, n) for i := 0; i < n; i++ { nn, err := rd.ReadMapLen() if err != nil { return nil, err } group := XInfoStreamGroup{} for j := 0; j < nn; j++ { key, err := rd.ReadString() if err != nil { return nil, err } switch key { case "name": group.Name, err = rd.ReadString() if err != nil { return nil, err } case "last-delivered-id": group.LastDeliveredID, err = rd.ReadString() if err != nil { return nil, err } case "entries-read": group.EntriesRead, err = rd.ReadInt() if err != nil && err != Nil { return nil, err } case "lag": // lag: the number of entries in the stream that are still waiting to be delivered // to the group's consumers, or a NULL(Nil) when that number can't be determined. group.Lag, err = rd.ReadInt() if err != nil && err != Nil { return nil, err } case "pel-count": group.PelCount, err = rd.ReadInt() if err != nil { return nil, err } case "pending": group.Pending, err = readXInfoStreamGroupPending(rd) if err != nil { return nil, err } case "consumers": group.Consumers, err = readXInfoStreamConsumers(rd) if err != nil { return nil, err } default: return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) } } groups = append(groups, group) } return groups, nil } func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { n, err := rd.ReadArrayLen() if err != nil { return nil, err } pending := make([]XInfoStreamGroupPending, 0, n) for i := 0; i < n; i++ { if err = rd.ReadFixedArrayLen(4); err != nil { return nil, err } p := XInfoStreamGroupPending{} p.ID, err = rd.ReadString() if err != nil { return nil, err } p.Consumer, err = rd.ReadString() if err != nil { return nil, err } delivery, err := rd.ReadInt() if err != nil { return nil, err } p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) p.DeliveryCount, err = rd.ReadInt() if err != nil { return nil, err } pending = append(pending, p) } return pending, nil } func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { n, err := rd.ReadArrayLen() if err != nil { return nil, err } consumers := make([]XInfoStreamConsumer, 0, n) for i := 0; i < n; i++ { nn, err := rd.ReadMapLen() if err != nil { return nil, err } c := XInfoStreamConsumer{} for f := 0; f < nn; f++ { cKey, err := rd.ReadString() if err != nil { return nil, err } switch cKey { case "name": c.Name, err = rd.ReadString() case "seen-time": seen, err := rd.ReadInt() if err != nil { return nil, err } c.SeenTime = time.UnixMilli(seen) case "active-time": active, err := rd.ReadInt() if err != nil { return nil, err } c.ActiveTime = time.UnixMilli(active) case "pel-count": c.PelCount, err = rd.ReadInt() case "pending": pendingNumber, err := rd.ReadArrayLen() if err != nil { return nil, err } c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) for pn := 0; pn < pendingNumber; pn++ { if err = rd.ReadFixedArrayLen(3); err != nil { return nil, err } p := XInfoStreamConsumerPending{} p.ID, err = rd.ReadString() if err != nil { return nil, err } delivery, err := rd.ReadInt() if err != nil { return nil, err } p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) p.DeliveryCount, err = rd.ReadInt() if err != nil { return nil, err } c.Pending = append(c.Pending, p) } default: return nil, fmt.Errorf("redis: unexpected content %s "+ "in XINFO STREAM FULL reply", cKey) } if err != nil { return nil, err } } consumers = append(consumers, c) } return consumers, nil } //------------------------------------------------------------------------------ type ZSliceCmd struct { baseCmd val []Z } var _ Cmder = (*ZSliceCmd)(nil) func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { return &ZSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *ZSliceCmd) SetVal(val []Z) { cmd.val = val } func (cmd *ZSliceCmd) Val() []Z { return cmd.val } func (cmd *ZSliceCmd) Result() ([]Z, error) { return cmd.val, cmd.err } func (cmd *ZSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl n, err := rd.ReadArrayLen() if err != nil { return err } // If the n is 0, can't continue reading. if n == 0 { cmd.val = make([]Z, 0) return nil } typ, err := rd.PeekReplyType() if err != nil { return err } array := typ == proto.RespArray if array { cmd.val = make([]Z, n) } else { cmd.val = make([]Z, n/2) } for i := 0; i < len(cmd.val); i++ { if array { if err = rd.ReadFixedArrayLen(2); err != nil { return err } } if cmd.val[i].Member, err = rd.ReadString(); err != nil { return err } if cmd.val[i].Score, err = rd.ReadFloat(); err != nil { return err } } return nil } //------------------------------------------------------------------------------ type ZWithKeyCmd struct { baseCmd val *ZWithKey } var _ Cmder = (*ZWithKeyCmd)(nil) func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { return &ZWithKeyCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { cmd.val = val } func (cmd *ZWithKeyCmd) Val() *ZWithKey { return cmd.val } func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { return cmd.val, cmd.err } func (cmd *ZWithKeyCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) { if err = rd.ReadFixedArrayLen(3); err != nil { return err } cmd.val = &ZWithKey{} if cmd.val.Key, err = rd.ReadString(); err != nil { return err } if cmd.val.Member, err = rd.ReadString(); err != nil { return err } if cmd.val.Score, err = rd.ReadFloat(); err != nil { return err } return nil } //------------------------------------------------------------------------------ type ScanCmd struct { baseCmd page []string cursor uint64 process cmdable } var _ Cmder = (*ScanCmd)(nil) func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { return &ScanCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, process: process, } } func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { cmd.page = page cmd.cursor = cursor } func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { return cmd.page, cmd.cursor } func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { return cmd.page, cmd.cursor, cmd.err } func (cmd *ScanCmd) String() string { return cmdString(cmd, cmd.page) } func (cmd *ScanCmd) readReply(rd *proto.Reader) error { if err := rd.ReadFixedArrayLen(2); err != nil { return err } cursor, err := rd.ReadUint() if err != nil { return err } cmd.cursor = cursor n, err := rd.ReadArrayLen() if err != nil { return err } cmd.page = make([]string, n) for i := 0; i < len(cmd.page); i++ { if cmd.page[i], err = rd.ReadString(); err != nil { return err } } return nil } // Iterator creates a new ScanIterator. func (cmd *ScanCmd) Iterator() *ScanIterator { return &ScanIterator{ cmd: cmd, } } //------------------------------------------------------------------------------ type ClusterNode struct { ID string Addr string NetworkingMetadata map[string]string } type ClusterSlot struct { Start int End int Nodes []ClusterNode } type ClusterSlotsCmd struct { baseCmd val []ClusterSlot } var _ Cmder = (*ClusterSlotsCmd)(nil) func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { return &ClusterSlotsCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { cmd.val = val } func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { return cmd.val } func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { return cmd.val, cmd.err } func (cmd *ClusterSlotsCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]ClusterSlot, n) for i := 0; i < len(cmd.val); i++ { n, err = rd.ReadArrayLen() if err != nil { return err } if n < 2 { return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) } start, err := rd.ReadInt() if err != nil { return err } end, err := rd.ReadInt() if err != nil { return err } // subtract start and end. nodes := make([]ClusterNode, n-2) for j := 0; j < len(nodes); j++ { nn, err := rd.ReadArrayLen() if err != nil { return err } if nn < 2 || nn > 4 { return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n) } ip, err := rd.ReadString() if err != nil { return err } port, err := rd.ReadString() if err != nil { return err } nodes[j].Addr = net.JoinHostPort(ip, port) if nn >= 3 { id, err := rd.ReadString() if err != nil { return err } nodes[j].ID = id } if nn >= 4 { metadataLength, err := rd.ReadMapLen() if err != nil { return err } networkingMetadata := make(map[string]string, metadataLength) for i := 0; i < metadataLength; i++ { key, err := rd.ReadString() if err != nil { return err } value, err := rd.ReadString() if err != nil { return err } networkingMetadata[key] = value } nodes[j].NetworkingMetadata = networkingMetadata } } cmd.val[i] = ClusterSlot{ Start: int(start), End: int(end), Nodes: nodes, } } return nil } //------------------------------------------------------------------------------ // GeoLocation is used with GeoAdd to add geospatial location. type GeoLocation struct { Name string Longitude, Latitude, Dist float64 GeoHash int64 } // GeoRadiusQuery is used with GeoRadius to query geospatial index. type GeoRadiusQuery struct { Radius float64 // Can be m, km, ft, or mi. Default is km. Unit string WithCoord bool WithDist bool WithGeoHash bool Count int // Can be ASC or DESC. Default is no sort order. Sort string Store string StoreDist string // WithCoord+WithDist+WithGeoHash withLen int } type GeoLocationCmd struct { baseCmd q *GeoRadiusQuery locations []GeoLocation } var _ Cmder = (*GeoLocationCmd)(nil) func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { return &GeoLocationCmd{ baseCmd: baseCmd{ ctx: ctx, args: geoLocationArgs(q, args...), }, q: q, } } func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { args = append(args, q.Radius) if q.Unit != "" { args = append(args, q.Unit) } else { args = append(args, "km") } if q.WithCoord { args = append(args, "withcoord") q.withLen++ } if q.WithDist { args = append(args, "withdist") q.withLen++ } if q.WithGeoHash { args = append(args, "withhash") q.withLen++ } if q.Count > 0 { args = append(args, "count", q.Count) } if q.Sort != "" { args = append(args, q.Sort) } if q.Store != "" { args = append(args, "store") args = append(args, q.Store) } if q.StoreDist != "" { args = append(args, "storedist") args = append(args, q.StoreDist) } return args } func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { cmd.locations = locations } func (cmd *GeoLocationCmd) Val() []GeoLocation { return cmd.locations } func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { return cmd.locations, cmd.err } func (cmd *GeoLocationCmd) String() string { return cmdString(cmd, cmd.locations) } func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.locations = make([]GeoLocation, n) for i := 0; i < len(cmd.locations); i++ { // only name if cmd.q.withLen == 0 { if cmd.locations[i].Name, err = rd.ReadString(); err != nil { return err } continue } // +name if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil { return err } if cmd.locations[i].Name, err = rd.ReadString(); err != nil { return err } if cmd.q.WithDist { if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil { return err } } if cmd.q.WithGeoHash { if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil { return err } } if cmd.q.WithCoord { if err = rd.ReadFixedArrayLen(2); err != nil { return err } if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil { return err } if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil { return err } } } return nil } //------------------------------------------------------------------------------ // GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. type GeoSearchQuery struct { Member string // Latitude and Longitude when using FromLonLat option. Longitude float64 Latitude float64 // Distance and unit when using ByRadius option. // Can use m, km, ft, or mi. Default is km. Radius float64 RadiusUnit string // Height, width and unit when using ByBox option. // Can be m, km, ft, or mi. Default is km. BoxWidth float64 BoxHeight float64 BoxUnit string // Can be ASC or DESC. Default is no sort order. Sort string Count int CountAny bool } type GeoSearchLocationQuery struct { GeoSearchQuery WithCoord bool WithDist bool WithHash bool } type GeoSearchStoreQuery struct { GeoSearchQuery // When using the StoreDist option, the command stores the items in a // sorted set populated with their distance from the center of the circle or box, // as a floating-point number, in the same unit specified for that shape. StoreDist bool } func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { args = geoSearchArgs(&q.GeoSearchQuery, args) if q.WithCoord { args = append(args, "withcoord") } if q.WithDist { args = append(args, "withdist") } if q.WithHash { args = append(args, "withhash") } return args } func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { if q.Member != "" { args = append(args, "frommember", q.Member) } else { args = append(args, "fromlonlat", q.Longitude, q.Latitude) } if q.Radius > 0 { if q.RadiusUnit == "" { q.RadiusUnit = "km" } args = append(args, "byradius", q.Radius, q.RadiusUnit) } else { if q.BoxUnit == "" { q.BoxUnit = "km" } args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) } if q.Sort != "" { args = append(args, q.Sort) } if q.Count > 0 { args = append(args, "count", q.Count) if q.CountAny { args = append(args, "any") } } return args } type GeoSearchLocationCmd struct { baseCmd opt *GeoSearchLocationQuery val []GeoLocation } var _ Cmder = (*GeoSearchLocationCmd)(nil) func NewGeoSearchLocationCmd( ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, ) *GeoSearchLocationCmd { return &GeoSearchLocationCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, opt: opt, } } func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { cmd.val = val } func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { return cmd.val } func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { return cmd.val, cmd.err } func (cmd *GeoSearchLocationCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]GeoLocation, n) for i := 0; i < n; i++ { _, err = rd.ReadArrayLen() if err != nil { return err } var loc GeoLocation loc.Name, err = rd.ReadString() if err != nil { return err } if cmd.opt.WithDist { loc.Dist, err = rd.ReadFloat() if err != nil { return err } } if cmd.opt.WithHash { loc.GeoHash, err = rd.ReadInt() if err != nil { return err } } if cmd.opt.WithCoord { if err = rd.ReadFixedArrayLen(2); err != nil { return err } loc.Longitude, err = rd.ReadFloat() if err != nil { return err } loc.Latitude, err = rd.ReadFloat() if err != nil { return err } } cmd.val[i] = loc } return nil } //------------------------------------------------------------------------------ type GeoPos struct { Longitude, Latitude float64 } type GeoPosCmd struct { baseCmd val []*GeoPos } var _ Cmder = (*GeoPosCmd)(nil) func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { return &GeoPosCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { cmd.val = val } func (cmd *GeoPosCmd) Val() []*GeoPos { return cmd.val } func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { return cmd.val, cmd.err } func (cmd *GeoPosCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]*GeoPos, n) for i := 0; i < len(cmd.val); i++ { err = rd.ReadFixedArrayLen(2) if err != nil { if err == Nil { cmd.val[i] = nil continue } return err } longitude, err := rd.ReadFloat() if err != nil { return err } latitude, err := rd.ReadFloat() if err != nil { return err } cmd.val[i] = &GeoPos{ Longitude: longitude, Latitude: latitude, } } return nil } //------------------------------------------------------------------------------ type CommandInfo struct { Name string Arity int8 Flags []string ACLFlags []string FirstKeyPos int8 LastKeyPos int8 StepCount int8 ReadOnly bool } type CommandsInfoCmd struct { baseCmd val map[string]*CommandInfo } var _ Cmder = (*CommandsInfoCmd)(nil) func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { return &CommandsInfoCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { cmd.val = val } func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { return cmd.val } func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { return cmd.val, cmd.err } func (cmd *CommandsInfoCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { const numArgRedis5 = 6 const numArgRedis6 = 7 const numArgRedis7 = 10 n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make(map[string]*CommandInfo, n) for i := 0; i < n; i++ { nn, err := rd.ReadArrayLen() if err != nil { return err } switch nn { case numArgRedis5, numArgRedis6, numArgRedis7: // ok default: return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn) } cmdInfo := &CommandInfo{} if cmdInfo.Name, err = rd.ReadString(); err != nil { return err } arity, err := rd.ReadInt() if err != nil { return err } cmdInfo.Arity = int8(arity) flagLen, err := rd.ReadArrayLen() if err != nil { return err } cmdInfo.Flags = make([]string, flagLen) for f := 0; f < len(cmdInfo.Flags); f++ { switch s, err := rd.ReadString(); { case err == Nil: cmdInfo.Flags[f] = "" case err != nil: return err default: if !cmdInfo.ReadOnly && s == "readonly" { cmdInfo.ReadOnly = true } cmdInfo.Flags[f] = s } } firstKeyPos, err := rd.ReadInt() if err != nil { return err } cmdInfo.FirstKeyPos = int8(firstKeyPos) lastKeyPos, err := rd.ReadInt() if err != nil { return err } cmdInfo.LastKeyPos = int8(lastKeyPos) stepCount, err := rd.ReadInt() if err != nil { return err } cmdInfo.StepCount = int8(stepCount) if nn >= numArgRedis6 { aclFlagLen, err := rd.ReadArrayLen() if err != nil { return err } cmdInfo.ACLFlags = make([]string, aclFlagLen) for f := 0; f < len(cmdInfo.ACLFlags); f++ { switch s, err := rd.ReadString(); { case err == Nil: cmdInfo.ACLFlags[f] = "" case err != nil: return err default: cmdInfo.ACLFlags[f] = s } } } if nn >= numArgRedis7 { if err := rd.DiscardNext(); err != nil { return err } if err := rd.DiscardNext(); err != nil { return err } if err := rd.DiscardNext(); err != nil { return err } } cmd.val[cmdInfo.Name] = cmdInfo } return nil } //------------------------------------------------------------------------------ type cmdsInfoCache struct { fn func(ctx context.Context) (map[string]*CommandInfo, error) once internal.Once cmds map[string]*CommandInfo } func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { return &cmdsInfoCache{ fn: fn, } } func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { err := c.once.Do(func() error { cmds, err := c.fn(ctx) if err != nil { return err } // Extensions have cmd names in upper case. Convert them to lower case. for k, v := range cmds { lower := internal.ToLower(k) if lower != k { cmds[lower] = v } } c.cmds = cmds return nil }) return c.cmds, err } //------------------------------------------------------------------------------ type SlowLog struct { ID int64 Time time.Time Duration time.Duration Args []string // These are also optional fields emitted only by Redis 4.0 or greater: // https://redis.io/commands/slowlog#output-format ClientAddr string ClientName string } type SlowLogCmd struct { baseCmd val []SlowLog } var _ Cmder = (*SlowLogCmd)(nil) func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { return &SlowLogCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *SlowLogCmd) SetVal(val []SlowLog) { cmd.val = val } func (cmd *SlowLogCmd) Val() []SlowLog { return cmd.val } func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { return cmd.val, cmd.err } func (cmd *SlowLogCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]SlowLog, n) for i := 0; i < len(cmd.val); i++ { nn, err := rd.ReadArrayLen() if err != nil { return err } if nn < 4 { return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn) } if cmd.val[i].ID, err = rd.ReadInt(); err != nil { return err } createdAt, err := rd.ReadInt() if err != nil { return err } cmd.val[i].Time = time.Unix(createdAt, 0) costs, err := rd.ReadInt() if err != nil { return err } cmd.val[i].Duration = time.Duration(costs) * time.Microsecond cmdLen, err := rd.ReadArrayLen() if err != nil { return err } if cmdLen < 1 { return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) } cmd.val[i].Args = make([]string, cmdLen) for f := 0; f < len(cmd.val[i].Args); f++ { cmd.val[i].Args[f], err = rd.ReadString() if err != nil { return err } } if nn >= 5 { if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil { return err } } if nn >= 6 { if cmd.val[i].ClientName, err = rd.ReadString(); err != nil { return err } } } return nil } //----------------------------------------------------------------------- type MapStringInterfaceCmd struct { baseCmd val map[string]interface{} } var _ Cmder = (*MapStringInterfaceCmd)(nil) func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd { return &MapStringInterfaceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) { cmd.val = val } func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} { return cmd.val } func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) { return cmd.val, cmd.err } func (cmd *MapStringInterfaceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadMapLen() if err != nil { return err } cmd.val = make(map[string]interface{}, n) for i := 0; i < n; i++ { k, err := rd.ReadString() if err != nil { return err } v, err := rd.ReadReply() if err != nil { if err == Nil { cmd.val[k] = Nil continue } if err, ok := err.(proto.RedisError); ok { cmd.val[k] = err continue } return err } cmd.val[k] = v } return nil } //----------------------------------------------------------------------- type MapStringStringSliceCmd struct { baseCmd val []map[string]string } var _ Cmder = (*MapStringStringSliceCmd)(nil) func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd { return &MapStringStringSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) { cmd.val = val } func (cmd *MapStringStringSliceCmd) Val() []map[string]string { return cmd.val } func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) { return cmd.val, cmd.err } func (cmd *MapStringStringSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]map[string]string, n) for i := 0; i < n; i++ { nn, err := rd.ReadMapLen() if err != nil { return err } cmd.val[i] = make(map[string]string, nn) for f := 0; f < nn; f++ { k, err := rd.ReadString() if err != nil { return err } v, err := rd.ReadString() if err != nil { return err } cmd.val[i][k] = v } } return nil } // ----------------------------------------------------------------------- // MapMapStringInterfaceCmd represents a command that returns a map of strings to interface{}. type MapMapStringInterfaceCmd struct { baseCmd val map[string]interface{} } func NewMapMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapMapStringInterfaceCmd { return &MapMapStringInterfaceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *MapMapStringInterfaceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *MapMapStringInterfaceCmd) SetVal(val map[string]interface{}) { cmd.val = val } func (cmd *MapMapStringInterfaceCmd) Result() (map[string]interface{}, error) { return cmd.val, cmd.err } func (cmd *MapMapStringInterfaceCmd) Val() map[string]interface{} { return cmd.val } // readReply will try to parse the reply from the proto.Reader for both resp2 and resp3 func (cmd *MapMapStringInterfaceCmd) readReply(rd *proto.Reader) (err error) { data, err := rd.ReadReply() if err != nil { return err } resultMap := map[string]interface{}{} switch midResponse := data.(type) { case map[interface{}]interface{}: // resp3 will return map for k, v := range midResponse { stringKey, ok := k.(string) if !ok { return fmt.Errorf("redis: invalid map key %#v", k) } resultMap[stringKey] = v } case []interface{}: // resp2 will return array of arrays n := len(midResponse) for i := 0; i < n; i++ { finalArr, ok := midResponse[i].([]interface{}) // final array that we need to transform to map if !ok { return fmt.Errorf("redis: unexpected response %#v", data) } m := len(finalArr) if m%2 != 0 { // since this should be map, keys should be even number return fmt.Errorf("redis: unexpected response %#v", data) } for j := 0; j < m; j += 2 { stringKey, ok := finalArr[j].(string) // the first one if !ok { return fmt.Errorf("redis: invalid map key %#v", finalArr[i]) } resultMap[stringKey] = finalArr[j+1] // second one is value } } default: return fmt.Errorf("redis: unexpected response %#v", data) } cmd.val = resultMap return nil } //----------------------------------------------------------------------- type MapStringInterfaceSliceCmd struct { baseCmd val []map[string]interface{} } var _ Cmder = (*MapStringInterfaceSliceCmd)(nil) func NewMapStringInterfaceSliceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceSliceCmd { return &MapStringInterfaceSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *MapStringInterfaceSliceCmd) SetVal(val []map[string]interface{}) { cmd.val = val } func (cmd *MapStringInterfaceSliceCmd) Val() []map[string]interface{} { return cmd.val } func (cmd *MapStringInterfaceSliceCmd) Result() ([]map[string]interface{}, error) { return cmd.val, cmd.err } func (cmd *MapStringInterfaceSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *MapStringInterfaceSliceCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]map[string]interface{}, n) for i := 0; i < n; i++ { nn, err := rd.ReadMapLen() if err != nil { return err } cmd.val[i] = make(map[string]interface{}, nn) for f := 0; f < nn; f++ { k, err := rd.ReadString() if err != nil { return err } v, err := rd.ReadReply() if err != nil { if err != Nil { return err } } cmd.val[i][k] = v } } return nil } //------------------------------------------------------------------------------ type KeyValuesCmd struct { baseCmd key string val []string } var _ Cmder = (*KeyValuesCmd)(nil) func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd { return &KeyValuesCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *KeyValuesCmd) SetVal(key string, val []string) { cmd.key = key cmd.val = val } func (cmd *KeyValuesCmd) Val() (string, []string) { return cmd.key, cmd.val } func (cmd *KeyValuesCmd) Result() (string, []string, error) { return cmd.key, cmd.val, cmd.err } func (cmd *KeyValuesCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) { if err = rd.ReadFixedArrayLen(2); err != nil { return err } cmd.key, err = rd.ReadString() if err != nil { return err } n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]string, n) for i := 0; i < n; i++ { cmd.val[i], err = rd.ReadString() if err != nil { return err } } return nil } //------------------------------------------------------------------------------ type ZSliceWithKeyCmd struct { baseCmd key string val []Z } var _ Cmder = (*ZSliceWithKeyCmd)(nil) func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd { return &ZSliceWithKeyCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) { cmd.key = key cmd.val = val } func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) { return cmd.key, cmd.val } func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) { return cmd.key, cmd.val, cmd.err } func (cmd *ZSliceWithKeyCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) { if err = rd.ReadFixedArrayLen(2); err != nil { return err } cmd.key, err = rd.ReadString() if err != nil { return err } n, err := rd.ReadArrayLen() if err != nil { return err } typ, err := rd.PeekReplyType() if err != nil { return err } array := typ == proto.RespArray if array { cmd.val = make([]Z, n) } else { cmd.val = make([]Z, n/2) } for i := 0; i < len(cmd.val); i++ { if array { if err = rd.ReadFixedArrayLen(2); err != nil { return err } } if cmd.val[i].Member, err = rd.ReadString(); err != nil { return err } if cmd.val[i].Score, err = rd.ReadFloat(); err != nil { return err } } return nil } type Function struct { Name string Description string Flags []string } type Library struct { Name string Engine string Functions []Function Code string } type FunctionListCmd struct { baseCmd val []Library } var _ Cmder = (*FunctionListCmd)(nil) func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd { return &FunctionListCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *FunctionListCmd) SetVal(val []Library) { cmd.val = val } func (cmd *FunctionListCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *FunctionListCmd) Val() []Library { return cmd.val } func (cmd *FunctionListCmd) Result() ([]Library, error) { return cmd.val, cmd.err } func (cmd *FunctionListCmd) First() (*Library, error) { if cmd.err != nil { return nil, cmd.err } if len(cmd.val) > 0 { return &cmd.val[0], nil } return nil, Nil } func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) { n, err := rd.ReadArrayLen() if err != nil { return err } libraries := make([]Library, n) for i := 0; i < n; i++ { nn, err := rd.ReadMapLen() if err != nil { return err } library := Library{} for f := 0; f < nn; f++ { key, err := rd.ReadString() if err != nil { return err } switch key { case "library_name": library.Name, err = rd.ReadString() case "engine": library.Engine, err = rd.ReadString() case "functions": library.Functions, err = cmd.readFunctions(rd) case "library_code": library.Code, err = rd.ReadString() default: return fmt.Errorf("redis: function list unexpected key %s", key) } if err != nil { return err } } libraries[i] = library } cmd.val = libraries return nil } func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) { n, err := rd.ReadArrayLen() if err != nil { return nil, err } functions := make([]Function, n) for i := 0; i < n; i++ { nn, err := rd.ReadMapLen() if err != nil { return nil, err } function := Function{} for f := 0; f < nn; f++ { key, err := rd.ReadString() if err != nil { return nil, err } switch key { case "name": if function.Name, err = rd.ReadString(); err != nil { return nil, err } case "description": if function.Description, err = rd.ReadString(); err != nil && err != Nil { return nil, err } case "flags": // resp set nx, err := rd.ReadArrayLen() if err != nil { return nil, err } function.Flags = make([]string, nx) for j := 0; j < nx; j++ { if function.Flags[j], err = rd.ReadString(); err != nil { return nil, err } } default: return nil, fmt.Errorf("redis: function list unexpected key %s", key) } } functions[i] = function } return functions, nil } // FunctionStats contains information about the scripts currently executing on the server, and the available engines // - Engines: // Statistics about the engine like number of functions and number of libraries // - RunningScript: // The script currently running on the shard we're connecting to. // For Redis Enterprise and Redis Cloud, this represents the // function with the longest running time, across all the running functions, on all shards // - RunningScripts // All scripts currently running in a Redis Enterprise clustered database. // Only available on Redis Enterprise type FunctionStats struct { Engines []Engine isRunning bool rs RunningScript allrs []RunningScript } func (fs *FunctionStats) Running() bool { return fs.isRunning } func (fs *FunctionStats) RunningScript() (RunningScript, bool) { return fs.rs, fs.isRunning } // AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database. // Only available on Redis Enterprise func (fs *FunctionStats) AllRunningScripts() []RunningScript { return fs.allrs } type RunningScript struct { Name string Command []string Duration time.Duration } type Engine struct { Language string LibrariesCount int64 FunctionsCount int64 } type FunctionStatsCmd struct { baseCmd val FunctionStats } var _ Cmder = (*FunctionStatsCmd)(nil) func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd { return &FunctionStatsCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) { cmd.val = val } func (cmd *FunctionStatsCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *FunctionStatsCmd) Val() FunctionStats { return cmd.val } func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) { return cmd.val, cmd.err } func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) { n, err := rd.ReadMapLen() if err != nil { return err } var key string var result FunctionStats for f := 0; f < n; f++ { key, err = rd.ReadString() if err != nil { return err } switch key { case "running_script": result.rs, result.isRunning, err = cmd.readRunningScript(rd) case "engines": result.Engines, err = cmd.readEngines(rd) case "all_running_scripts": // Redis Enterprise only result.allrs, result.isRunning, err = cmd.readRunningScripts(rd) default: return fmt.Errorf("redis: function stats unexpected key %s", key) } if err != nil { return err } } cmd.val = result return nil } func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) { err := rd.ReadFixedMapLen(3) if err != nil { if err == Nil { return RunningScript{}, false, nil } return RunningScript{}, false, err } var runningScript RunningScript for i := 0; i < 3; i++ { key, err := rd.ReadString() if err != nil { return RunningScript{}, false, err } switch key { case "name": runningScript.Name, err = rd.ReadString() case "duration_ms": runningScript.Duration, err = cmd.readDuration(rd) case "command": runningScript.Command, err = cmd.readCommand(rd) default: return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key) } if err != nil { return RunningScript{}, false, err } } return runningScript, true, nil } func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) { n, err := rd.ReadMapLen() if err != nil { return nil, err } engines := make([]Engine, 0, n) for i := 0; i < n; i++ { engine := Engine{} engine.Language, err = rd.ReadString() if err != nil { return nil, err } err = rd.ReadFixedMapLen(2) if err != nil { return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language) } for i := 0; i < 2; i++ { key, err := rd.ReadString() switch key { case "libraries_count": engine.LibrariesCount, err = rd.ReadInt() case "functions_count": engine.FunctionsCount, err = rd.ReadInt() } if err != nil { return nil, err } } engines = append(engines, engine) } return engines, nil } func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) { t, err := rd.ReadInt() if err != nil { return time.Duration(0), err } return time.Duration(t) * time.Millisecond, nil } func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) { n, err := rd.ReadArrayLen() if err != nil { return nil, err } command := make([]string, 0, n) for i := 0; i < n; i++ { x, err := rd.ReadString() if err != nil { return nil, err } command = append(command, x) } return command, nil } func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) { n, err := rd.ReadArrayLen() if err != nil { return nil, false, err } runningScripts := make([]RunningScript, 0, n) for i := 0; i < n; i++ { rs, _, err := cmd.readRunningScript(rd) if err != nil { return nil, false, err } runningScripts = append(runningScripts, rs) } return runningScripts, len(runningScripts) > 0, nil } //------------------------------------------------------------------------------ // LCSQuery is a parameter used for the LCS command type LCSQuery struct { Key1 string Key2 string Len bool Idx bool MinMatchLen int WithMatchLen bool } // LCSMatch is the result set of the LCS command. type LCSMatch struct { MatchString string Matches []LCSMatchedPosition Len int64 } type LCSMatchedPosition struct { Key1 LCSPosition Key2 LCSPosition // only for withMatchLen is true MatchLen int64 } type LCSPosition struct { Start int64 End int64 } type LCSCmd struct { baseCmd // 1: match string // 2: match len // 3: match idx LCSMatch readType uint8 val *LCSMatch } func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd { args := make([]interface{}, 3, 7) args[0] = "lcs" args[1] = q.Key1 args[2] = q.Key2 cmd := &LCSCmd{readType: 1} if q.Len { cmd.readType = 2 args = append(args, "len") } else if q.Idx { cmd.readType = 3 args = append(args, "idx") if q.MinMatchLen != 0 { args = append(args, "minmatchlen", q.MinMatchLen) } if q.WithMatchLen { args = append(args, "withmatchlen") } } cmd.baseCmd = baseCmd{ ctx: ctx, args: args, } return cmd } func (cmd *LCSCmd) SetVal(val *LCSMatch) { cmd.val = val } func (cmd *LCSCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *LCSCmd) Val() *LCSMatch { return cmd.val } func (cmd *LCSCmd) Result() (*LCSMatch, error) { return cmd.val, cmd.err } func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) { lcs := &LCSMatch{} switch cmd.readType { case 1: // match string if lcs.MatchString, err = rd.ReadString(); err != nil { return err } case 2: // match len if lcs.Len, err = rd.ReadInt(); err != nil { return err } case 3: // read LCSMatch if err = rd.ReadFixedMapLen(2); err != nil { return err } // read matches or len field for i := 0; i < 2; i++ { key, err := rd.ReadString() if err != nil { return err } switch key { case "matches": // read array of matched positions if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil { return err } case "len": // read match length if lcs.Len, err = rd.ReadInt(); err != nil { return err } } } } cmd.val = lcs return nil } func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) { n, err := rd.ReadArrayLen() if err != nil { return nil, err } positions := make([]LCSMatchedPosition, n) for i := 0; i < n; i++ { pn, err := rd.ReadArrayLen() if err != nil { return nil, err } if positions[i].Key1, err = cmd.readPosition(rd); err != nil { return nil, err } if positions[i].Key2, err = cmd.readPosition(rd); err != nil { return nil, err } // read match length if WithMatchLen is true if pn > 2 { if positions[i].MatchLen, err = rd.ReadInt(); err != nil { return nil, err } } } return positions, nil } func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) { if err = rd.ReadFixedArrayLen(2); err != nil { return pos, err } if pos.Start, err = rd.ReadInt(); err != nil { return pos, err } if pos.End, err = rd.ReadInt(); err != nil { return pos, err } return pos, nil } // ------------------------------------------------------------------------ type KeyFlags struct { Key string Flags []string } type KeyFlagsCmd struct { baseCmd val []KeyFlags } var _ Cmder = (*KeyFlagsCmd)(nil) func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd { return &KeyFlagsCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) { cmd.val = val } func (cmd *KeyFlagsCmd) Val() []KeyFlags { return cmd.val } func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) { return cmd.val, cmd.err } func (cmd *KeyFlagsCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } if n == 0 { cmd.val = make([]KeyFlags, 0) return nil } cmd.val = make([]KeyFlags, n) for i := 0; i < len(cmd.val); i++ { if err = rd.ReadFixedArrayLen(2); err != nil { return err } if cmd.val[i].Key, err = rd.ReadString(); err != nil { return err } flagsLen, err := rd.ReadArrayLen() if err != nil { return err } cmd.val[i].Flags = make([]string, flagsLen) for j := 0; j < flagsLen; j++ { if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil { return err } } } return nil } // --------------------------------------------------------------------------------------------------- type ClusterLink struct { Direction string Node string CreateTime int64 Events string SendBufferAllocated int64 SendBufferUsed int64 } type ClusterLinksCmd struct { baseCmd val []ClusterLink } var _ Cmder = (*ClusterLinksCmd)(nil) func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd { return &ClusterLinksCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) { cmd.val = val } func (cmd *ClusterLinksCmd) Val() []ClusterLink { return cmd.val } func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) { return cmd.val, cmd.err } func (cmd *ClusterLinksCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]ClusterLink, n) for i := 0; i < len(cmd.val); i++ { m, err := rd.ReadMapLen() if err != nil { return err } for j := 0; j < m; j++ { key, err := rd.ReadString() if err != nil { return err } switch key { case "direction": cmd.val[i].Direction, err = rd.ReadString() case "node": cmd.val[i].Node, err = rd.ReadString() case "create-time": cmd.val[i].CreateTime, err = rd.ReadInt() case "events": cmd.val[i].Events, err = rd.ReadString() case "send-buffer-allocated": cmd.val[i].SendBufferAllocated, err = rd.ReadInt() case "send-buffer-used": cmd.val[i].SendBufferUsed, err = rd.ReadInt() default: return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key) } if err != nil { return err } } } return nil } // ------------------------------------------------------------------------------------------------------------------ type SlotRange struct { Start int64 End int64 } type Node struct { ID string Endpoint string IP string Hostname string Port int64 TLSPort int64 Role string ReplicationOffset int64 Health string } type ClusterShard struct { Slots []SlotRange Nodes []Node } type ClusterShardsCmd struct { baseCmd val []ClusterShard } var _ Cmder = (*ClusterShardsCmd)(nil) func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd { return &ClusterShardsCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) { cmd.val = val } func (cmd *ClusterShardsCmd) Val() []ClusterShard { return cmd.val } func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) { return cmd.val, cmd.err } func (cmd *ClusterShardsCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]ClusterShard, n) for i := 0; i < n; i++ { m, err := rd.ReadMapLen() if err != nil { return err } for j := 0; j < m; j++ { key, err := rd.ReadString() if err != nil { return err } switch key { case "slots": l, err := rd.ReadArrayLen() if err != nil { return err } for k := 0; k < l; k += 2 { start, err := rd.ReadInt() if err != nil { return err } end, err := rd.ReadInt() if err != nil { return err } cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end}) } case "nodes": nodesLen, err := rd.ReadArrayLen() if err != nil { return err } cmd.val[i].Nodes = make([]Node, nodesLen) for k := 0; k < nodesLen; k++ { nodeMapLen, err := rd.ReadMapLen() if err != nil { return err } for l := 0; l < nodeMapLen; l++ { nodeKey, err := rd.ReadString() if err != nil { return err } switch nodeKey { case "id": cmd.val[i].Nodes[k].ID, err = rd.ReadString() case "endpoint": cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString() case "ip": cmd.val[i].Nodes[k].IP, err = rd.ReadString() case "hostname": cmd.val[i].Nodes[k].Hostname, err = rd.ReadString() case "port": cmd.val[i].Nodes[k].Port, err = rd.ReadInt() case "tls-port": cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt() case "role": cmd.val[i].Nodes[k].Role, err = rd.ReadString() case "replication-offset": cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt() case "health": cmd.val[i].Nodes[k].Health, err = rd.ReadString() default: return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey) } if err != nil { return err } } } default: return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key) } } } return nil } // ----------------------------------------- type RankScore struct { Rank int64 Score float64 } type RankWithScoreCmd struct { baseCmd val RankScore } var _ Cmder = (*RankWithScoreCmd)(nil) func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd { return &RankWithScoreCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *RankWithScoreCmd) SetVal(val RankScore) { cmd.val = val } func (cmd *RankWithScoreCmd) Val() RankScore { return cmd.val } func (cmd *RankWithScoreCmd) Result() (RankScore, error) { return cmd.val, cmd.err } func (cmd *RankWithScoreCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error { if err := rd.ReadFixedArrayLen(2); err != nil { return err } rank, err := rd.ReadInt() if err != nil { return err } score, err := rd.ReadFloat() if err != nil { return err } cmd.val = RankScore{Rank: rank, Score: score} return nil } // -------------------------------------------------------------------------------------------------- // ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0) type ClientFlags uint64 const ( ClientSlave ClientFlags = 1 << 0 /* This client is a replica */ ClientMaster ClientFlags = 1 << 1 /* This client is a master */ ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */ ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */ ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */ ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */ ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */ ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */ ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */ ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */ ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */ ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */ ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */ ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */ ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */ ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */ ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */ ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */ ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */ ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */ ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */ ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */ ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */ ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */ ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */ ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */ ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */ ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */ ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */ ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling a command. usually this will be marked only during call() however, blocked clients might have this flag kept until they will try to reprocess the command. */ ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */ ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */ ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */ ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */ ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */ ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */ ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */ ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/ ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */ ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */ ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */ ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */ ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */ ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */ ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */ ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */ ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */ ) // ClientInfo is redis-server ClientInfo, not go-redis *Client type ClientInfo struct { ID int64 // redis version 2.8.12, a unique 64-bit client ID Addr string // address/port of the client LAddr string // address/port of local address client connected to (bind address) FD int64 // file descriptor corresponding to the socket Name string // the name set by the client with CLIENT SETNAME Age time.Duration // total duration of the connection in seconds Idle time.Duration // idle time of the connection in seconds Flags ClientFlags // client flags (see below) DB int // current database ID Sub int // number of channel subscriptions PSub int // number of pattern matching subscriptions SSub int // redis version 7.0.3, number of shard channel subscriptions Multi int // number of commands in a MULTI/EXEC context Watch int // redis version 7.4 RC1, number of keys this client is currently watching. QueryBuf int // qbuf, query buffer length (0 means no query pending) QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full) ArgvMem int // incomplete arguments for the next command (already extracted from query buffer) MultiMem int // redis version 7.0, memory is used up by buffered multi commands BufferSize int // rbs, usable size of buffer BufferPeak int // rbp, peak used size of buffer in last 5 sec interval OutputBufferLength int // obl, output buffer length OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full) OutputMemory int // omem, output buffer memory usage TotalMemory int // tot-mem, total memory consumed by this client in its various buffers IoThread int // io-thread id Events string // file descriptor events (see below) LastCmd string // cmd, last command played User string // the authenticated username of the client Redir int64 // client id of current client tracking redirection Resp int // redis version 7.0, client RESP protocol version LibName string // redis version 7.2, client library name LibVer string // redis version 7.2, client library version } type ClientInfoCmd struct { baseCmd val *ClientInfo } var _ Cmder = (*ClientInfoCmd)(nil) func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd { return &ClientInfoCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) { cmd.val = val } func (cmd *ClientInfoCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *ClientInfoCmd) Val() *ClientInfo { return cmd.val } func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) { return cmd.val, cmd.err } func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) { txt, err := rd.ReadString() if err != nil { return err } // sds o = catClientInfoString(sdsempty(), c); // o = sdscatlen(o,"\n",1); // addReplyVerbatim(c,o,sdslen(o),"txt"); // sdsfree(o); cmd.val, err = parseClientInfo(strings.TrimSpace(txt)) return err } // fmt.Sscanf() cannot handle null values func parseClientInfo(txt string) (info *ClientInfo, err error) { info = &ClientInfo{} for _, s := range strings.Split(txt, " ") { kv := strings.Split(s, "=") if len(kv) != 2 { return nil, fmt.Errorf("redis: unexpected client info data (%s)", s) } key, val := kv[0], kv[1] switch key { case "id": info.ID, err = strconv.ParseInt(val, 10, 64) case "addr": info.Addr = val case "laddr": info.LAddr = val case "fd": info.FD, err = strconv.ParseInt(val, 10, 64) case "name": info.Name = val case "age": var age int if age, err = strconv.Atoi(val); err == nil { info.Age = time.Duration(age) * time.Second } case "idle": var idle int if idle, err = strconv.Atoi(val); err == nil { info.Idle = time.Duration(idle) * time.Second } case "flags": if val == "N" { break } for i := 0; i < len(val); i++ { switch val[i] { case 'S': info.Flags |= ClientSlave case 'O': info.Flags |= ClientSlave | ClientMonitor case 'M': info.Flags |= ClientMaster case 'P': info.Flags |= ClientPubSub case 'x': info.Flags |= ClientMulti case 'b': info.Flags |= ClientBlocked case 't': info.Flags |= ClientTracking case 'R': info.Flags |= ClientTrackingBrokenRedir case 'B': info.Flags |= ClientTrackingBCAST case 'd': info.Flags |= ClientDirtyCAS case 'c': info.Flags |= ClientCloseAfterCommand case 'u': info.Flags |= ClientUnBlocked case 'A': info.Flags |= ClientCloseASAP case 'U': info.Flags |= ClientUnixSocket case 'r': info.Flags |= ClientReadOnly case 'e': info.Flags |= ClientNoEvict case 'T': info.Flags |= ClientNoTouch default: return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i])) } } case "db": info.DB, err = strconv.Atoi(val) case "sub": info.Sub, err = strconv.Atoi(val) case "psub": info.PSub, err = strconv.Atoi(val) case "ssub": info.SSub, err = strconv.Atoi(val) case "multi": info.Multi, err = strconv.Atoi(val) case "watch": info.Watch, err = strconv.Atoi(val) case "qbuf": info.QueryBuf, err = strconv.Atoi(val) case "qbuf-free": info.QueryBufFree, err = strconv.Atoi(val) case "argv-mem": info.ArgvMem, err = strconv.Atoi(val) case "multi-mem": info.MultiMem, err = strconv.Atoi(val) case "rbs": info.BufferSize, err = strconv.Atoi(val) case "rbp": info.BufferPeak, err = strconv.Atoi(val) case "obl": info.OutputBufferLength, err = strconv.Atoi(val) case "oll": info.OutputListLength, err = strconv.Atoi(val) case "omem": info.OutputMemory, err = strconv.Atoi(val) case "tot-mem": info.TotalMemory, err = strconv.Atoi(val) case "events": info.Events = val case "cmd": info.LastCmd = val case "user": info.User = val case "redir": info.Redir, err = strconv.ParseInt(val, 10, 64) case "resp": info.Resp, err = strconv.Atoi(val) case "lib-name": info.LibName = val case "lib-ver": info.LibVer = val case "io-thread": info.IoThread, err = strconv.Atoi(val) default: return nil, fmt.Errorf("redis: unexpected client info key(%s)", key) } if err != nil { return nil, err } } return info, nil } // ------------------------------------------- type ACLLogEntry struct { Count int64 Reason string Context string Object string Username string AgeSeconds float64 ClientInfo *ClientInfo EntryID int64 TimestampCreated int64 TimestampLastUpdated int64 } type ACLLogCmd struct { baseCmd val []*ACLLogEntry } var _ Cmder = (*ACLLogCmd)(nil) func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd { return &ACLLogCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) { cmd.val = val } func (cmd *ACLLogCmd) Val() []*ACLLogEntry { return cmd.val } func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) { return cmd.val, cmd.err } func (cmd *ACLLogCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadArrayLen() if err != nil { return err } cmd.val = make([]*ACLLogEntry, n) for i := 0; i < n; i++ { cmd.val[i] = &ACLLogEntry{} entry := cmd.val[i] respLen, err := rd.ReadMapLen() if err != nil { return err } for j := 0; j < respLen; j++ { key, err := rd.ReadString() if err != nil { return err } switch key { case "count": entry.Count, err = rd.ReadInt() case "reason": entry.Reason, err = rd.ReadString() case "context": entry.Context, err = rd.ReadString() case "object": entry.Object, err = rd.ReadString() case "username": entry.Username, err = rd.ReadString() case "age-seconds": entry.AgeSeconds, err = rd.ReadFloat() case "client-info": txt, err := rd.ReadString() if err != nil { return err } entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt)) if err != nil { return err } case "entry-id": entry.EntryID, err = rd.ReadInt() case "timestamp-created": entry.TimestampCreated, err = rd.ReadInt() case "timestamp-last-updated": entry.TimestampLastUpdated, err = rd.ReadInt() default: return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key) } if err != nil { return err } } } return nil } // LibraryInfo holds the library info. type LibraryInfo struct { LibName *string LibVer *string } // WithLibraryName returns a valid LibraryInfo with library name only. func WithLibraryName(libName string) LibraryInfo { return LibraryInfo{LibName: &libName} } // WithLibraryVersion returns a valid LibraryInfo with library version only. func WithLibraryVersion(libVer string) LibraryInfo { return LibraryInfo{LibVer: &libVer} } // ------------------------------------------- type InfoCmd struct { baseCmd val map[string]map[string]string } var _ Cmder = (*InfoCmd)(nil) func NewInfoCmd(ctx context.Context, args ...interface{}) *InfoCmd { return &InfoCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *InfoCmd) SetVal(val map[string]map[string]string) { cmd.val = val } func (cmd *InfoCmd) Val() map[string]map[string]string { return cmd.val } func (cmd *InfoCmd) Result() (map[string]map[string]string, error) { return cmd.val, cmd.err } func (cmd *InfoCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *InfoCmd) readReply(rd *proto.Reader) error { val, err := rd.ReadString() if err != nil { return err } section := "" scanner := bufio.NewScanner(strings.NewReader(val)) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "#") { if cmd.val == nil { cmd.val = make(map[string]map[string]string) } section = strings.TrimPrefix(line, "# ") cmd.val[section] = make(map[string]string) } else if line != "" { if section == "Modules" { moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`) kv := moduleRe.FindStringSubmatch(line) if len(kv) == 3 { cmd.val[section][kv[1]] = kv[2] } } else { kv := strings.SplitN(line, ":", 2) if len(kv) == 2 { cmd.val[section][kv[0]] = kv[1] } } } } return nil } func (cmd *InfoCmd) Item(section, key string) string { if cmd.val == nil { return "" } else if cmd.val[section] == nil { return "" } else { return cmd.val[section][key] } } type MonitorStatus int const ( monitorStatusIdle MonitorStatus = iota monitorStatusStart monitorStatusStop ) type MonitorCmd struct { baseCmd ch chan string status MonitorStatus mu sync.Mutex } func newMonitorCmd(ctx context.Context, ch chan string) *MonitorCmd { return &MonitorCmd{ baseCmd: baseCmd{ ctx: ctx, args: []interface{}{"monitor"}, }, ch: ch, status: monitorStatusIdle, mu: sync.Mutex{}, } } func (cmd *MonitorCmd) String() string { return cmdString(cmd, nil) } func (cmd *MonitorCmd) readReply(rd *proto.Reader) error { ctx, cancel := context.WithCancel(cmd.ctx) go func(ctx context.Context) { for { select { case <-ctx.Done(): return default: err := cmd.readMonitor(rd, cancel) if err != nil { cmd.err = err return } } } }(ctx) return nil } func (cmd *MonitorCmd) readMonitor(rd *proto.Reader, cancel context.CancelFunc) error { for { cmd.mu.Lock() st := cmd.status pk, _ := rd.Peek(1) cmd.mu.Unlock() if len(pk) != 0 && st == monitorStatusStart { cmd.mu.Lock() line, err := rd.ReadString() cmd.mu.Unlock() if err != nil { return err } cmd.ch <- line } if st == monitorStatusStop { cancel() break } } return nil } func (cmd *MonitorCmd) Start() { cmd.mu.Lock() defer cmd.mu.Unlock() cmd.status = monitorStatusStart } func (cmd *MonitorCmd) Stop() { cmd.mu.Lock() defer cmd.mu.Unlock() cmd.status = monitorStatusStop } type VectorScoreSliceCmd struct { baseCmd val []VectorScore } var _ Cmder = (*VectorScoreSliceCmd)(nil) func NewVectorInfoSliceCmd(ctx context.Context, args ...any) *VectorScoreSliceCmd { return &VectorScoreSliceCmd{ baseCmd: baseCmd{ ctx: ctx, args: args, }, } } func (cmd *VectorScoreSliceCmd) SetVal(val []VectorScore) { cmd.val = val } func (cmd *VectorScoreSliceCmd) Val() []VectorScore { return cmd.val } func (cmd *VectorScoreSliceCmd) Result() ([]VectorScore, error) { return cmd.val, cmd.err } func (cmd *VectorScoreSliceCmd) String() string { return cmdString(cmd, cmd.val) } func (cmd *VectorScoreSliceCmd) readReply(rd *proto.Reader) error { n, err := rd.ReadMapLen() if err != nil { return err } cmd.val = make([]VectorScore, n) for i := 0; i < n; i++ { name, err := rd.ReadString() if err != nil { return err } cmd.val[i].Name = name score, err := rd.ReadFloat() if err != nil { return err } cmd.val[i].Score = score } return nil } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/redis.go0000644000000000000000000006172415024302467023364 0ustar rootrootpackage redis import ( "context" "errors" "fmt" "net" "sync" "sync/atomic" "time" "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hscan" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" ) // Scanner internal/hscan.Scanner exposed interface. type Scanner = hscan.Scanner // Nil reply returned by Redis when key does not exist. const Nil = proto.Nil // SetLogger set custom log func SetLogger(logger internal.Logging) { internal.Logger = logger } //------------------------------------------------------------------------------ type Hook interface { DialHook(next DialHook) DialHook ProcessHook(next ProcessHook) ProcessHook ProcessPipelineHook(next ProcessPipelineHook) ProcessPipelineHook } type ( DialHook func(ctx context.Context, network, addr string) (net.Conn, error) ProcessHook func(ctx context.Context, cmd Cmder) error ProcessPipelineHook func(ctx context.Context, cmds []Cmder) error ) type hooksMixin struct { hooksMu *sync.RWMutex slice []Hook initial hooks current hooks } func (hs *hooksMixin) initHooks(hooks hooks) { hs.hooksMu = new(sync.RWMutex) hs.initial = hooks hs.chain() } type hooks struct { dial DialHook process ProcessHook pipeline ProcessPipelineHook txPipeline ProcessPipelineHook } func (h *hooks) setDefaults() { if h.dial == nil { h.dial = func(ctx context.Context, network, addr string) (net.Conn, error) { return nil, nil } } if h.process == nil { h.process = func(ctx context.Context, cmd Cmder) error { return nil } } if h.pipeline == nil { h.pipeline = func(ctx context.Context, cmds []Cmder) error { return nil } } if h.txPipeline == nil { h.txPipeline = func(ctx context.Context, cmds []Cmder) error { return nil } } } // AddHook is to add a hook to the queue. // Hook is a function executed during network connection, command execution, and pipeline, // it is a first-in-first-out stack queue (FIFO). // You need to execute the next hook in each hook, unless you want to terminate the execution of the command. // For example, you added hook-1, hook-2: // // client.AddHook(hook-1, hook-2) // // hook-1: // // func (Hook1) ProcessHook(next redis.ProcessHook) redis.ProcessHook { // return func(ctx context.Context, cmd Cmder) error { // print("hook-1 start") // next(ctx, cmd) // print("hook-1 end") // return nil // } // } // // hook-2: // // func (Hook2) ProcessHook(next redis.ProcessHook) redis.ProcessHook { // return func(ctx context.Context, cmd redis.Cmder) error { // print("hook-2 start") // next(ctx, cmd) // print("hook-2 end") // return nil // } // } // // The execution sequence is: // // hook-1 start -> hook-2 start -> exec redis cmd -> hook-2 end -> hook-1 end // // Please note: "next(ctx, cmd)" is very important, it will call the next hook, // if "next(ctx, cmd)" is not executed, the redis command will not be executed. func (hs *hooksMixin) AddHook(hook Hook) { hs.slice = append(hs.slice, hook) hs.chain() } func (hs *hooksMixin) chain() { hs.initial.setDefaults() hs.hooksMu.Lock() defer hs.hooksMu.Unlock() hs.current.dial = hs.initial.dial hs.current.process = hs.initial.process hs.current.pipeline = hs.initial.pipeline hs.current.txPipeline = hs.initial.txPipeline for i := len(hs.slice) - 1; i >= 0; i-- { if wrapped := hs.slice[i].DialHook(hs.current.dial); wrapped != nil { hs.current.dial = wrapped } if wrapped := hs.slice[i].ProcessHook(hs.current.process); wrapped != nil { hs.current.process = wrapped } if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.pipeline); wrapped != nil { hs.current.pipeline = wrapped } if wrapped := hs.slice[i].ProcessPipelineHook(hs.current.txPipeline); wrapped != nil { hs.current.txPipeline = wrapped } } } func (hs *hooksMixin) clone() hooksMixin { hs.hooksMu.Lock() defer hs.hooksMu.Unlock() clone := *hs l := len(clone.slice) clone.slice = clone.slice[:l:l] clone.hooksMu = new(sync.RWMutex) return clone } func (hs *hooksMixin) withProcessHook(ctx context.Context, cmd Cmder, hook ProcessHook) error { for i := len(hs.slice) - 1; i >= 0; i-- { if wrapped := hs.slice[i].ProcessHook(hook); wrapped != nil { hook = wrapped } } return hook(ctx, cmd) } func (hs *hooksMixin) withProcessPipelineHook( ctx context.Context, cmds []Cmder, hook ProcessPipelineHook, ) error { for i := len(hs.slice) - 1; i >= 0; i-- { if wrapped := hs.slice[i].ProcessPipelineHook(hook); wrapped != nil { hook = wrapped } } return hook(ctx, cmds) } func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) { // Access to hs.current is guarded by a read-only lock since it may be mutated by AddHook(...) // while this dialer is concurrently accessed by the background connection pool population // routine when MinIdleConns > 0. hs.hooksMu.RLock() current := hs.current hs.hooksMu.RUnlock() return current.dial(ctx, network, addr) } func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error { return hs.current.process(ctx, cmd) } func (hs *hooksMixin) processPipelineHook(ctx context.Context, cmds []Cmder) error { return hs.current.pipeline(ctx, cmds) } func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) error { return hs.current.txPipeline(ctx, cmds) } //------------------------------------------------------------------------------ type baseClient struct { opt *Options connPool pool.Pooler hooksMixin onClose func() error // hook called when client is closed } func (c *baseClient) clone() *baseClient { clone := *c return &clone } func (c *baseClient) withTimeout(timeout time.Duration) *baseClient { opt := c.opt.clone() opt.ReadTimeout = timeout opt.WriteTimeout = timeout clone := c.clone() clone.opt = opt return clone } func (c *baseClient) String() string { return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) } func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) { cn, err := c.connPool.NewConn(ctx) if err != nil { return nil, err } err = c.initConn(ctx, cn) if err != nil { _ = c.connPool.CloseConn(cn) return nil, err } return cn, nil } func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) { if c.opt.Limiter != nil { err := c.opt.Limiter.Allow() if err != nil { return nil, err } } cn, err := c._getConn(ctx) if err != nil { if c.opt.Limiter != nil { c.opt.Limiter.ReportResult(err) } return nil, err } return cn, nil } func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { cn, err := c.connPool.Get(ctx) if err != nil { return nil, err } if cn.Inited { return cn, nil } if err := c.initConn(ctx, cn); err != nil { c.connPool.Remove(ctx, cn, err) if err := errors.Unwrap(err); err != nil { return nil, err } return nil, err } return cn, nil } func (c *baseClient) newReAuthCredentialsListener(poolCn *pool.Conn) auth.CredentialsListener { return auth.NewReAuthCredentialsListener( c.reAuthConnection(poolCn), c.onAuthenticationErr(poolCn), ) } func (c *baseClient) reAuthConnection(poolCn *pool.Conn) func(credentials auth.Credentials) error { return func(credentials auth.Credentials) error { var err error username, password := credentials.BasicAuth() ctx := context.Background() connPool := pool.NewSingleConnPool(c.connPool, poolCn) // hooksMixin are intentionally empty here cn := newConn(c.opt, connPool, nil) if username != "" { err = cn.AuthACL(ctx, username, password).Err() } else { err = cn.Auth(ctx, password).Err() } return err } } func (c *baseClient) onAuthenticationErr(poolCn *pool.Conn) func(err error) { return func(err error) { if err != nil { if isBadConn(err, false, c.opt.Addr) { // Close the connection to force a reconnection. err := c.connPool.CloseConn(poolCn) if err != nil { internal.Logger.Printf(context.Background(), "redis: failed to close connection: %v", err) // try to close the network connection directly // so that no resource is leaked err := poolCn.Close() if err != nil { internal.Logger.Printf(context.Background(), "redis: failed to close network connection: %v", err) } } } internal.Logger.Printf(context.Background(), "redis: re-authentication failed: %v", err) } } } func (c *baseClient) wrappedOnClose(newOnClose func() error) func() error { onClose := c.onClose return func() error { var firstErr error err := newOnClose() // Even if we have an error we would like to execute the onClose hook // if it exists. We will return the first error that occurred. // This is to keep error handling consistent with the rest of the code. if err != nil { firstErr = err } if onClose != nil { err = onClose() if err != nil && firstErr == nil { firstErr = err } } return firstErr } } func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { if cn.Inited { return nil } var err error cn.Inited = true connPool := pool.NewSingleConnPool(c.connPool, cn) conn := newConn(c.opt, connPool, &c.hooksMixin) username, password := "", "" if c.opt.StreamingCredentialsProvider != nil { credentials, unsubscribeFromCredentialsProvider, err := c.opt.StreamingCredentialsProvider. Subscribe(c.newReAuthCredentialsListener(cn)) if err != nil { return fmt.Errorf("failed to subscribe to streaming credentials: %w", err) } c.onClose = c.wrappedOnClose(unsubscribeFromCredentialsProvider) cn.SetOnClose(unsubscribeFromCredentialsProvider) username, password = credentials.BasicAuth() } else if c.opt.CredentialsProviderContext != nil { username, password, err = c.opt.CredentialsProviderContext(ctx) if err != nil { return fmt.Errorf("failed to get credentials from context provider: %w", err) } } else if c.opt.CredentialsProvider != nil { username, password = c.opt.CredentialsProvider() } else if c.opt.Username != "" || c.opt.Password != "" { username, password = c.opt.Username, c.opt.Password } // for redis-server versions that do not support the HELLO command, // RESP2 will continue to be used. if err = conn.Hello(ctx, c.opt.Protocol, username, password, c.opt.ClientName).Err(); err == nil { // Authentication successful with HELLO command } else if !isRedisError(err) { // When the server responds with the RESP protocol and the result is not a normal // execution result of the HELLO command, we consider it to be an indication that // the server does not support the HELLO command. // The server may be a redis-server that does not support the HELLO command, // or it could be DragonflyDB or a third-party redis-proxy. They all respond // with different error string results for unsupported commands, making it // difficult to rely on error strings to determine all results. return err } else if password != "" { // Try legacy AUTH command if HELLO failed if username != "" { err = conn.AuthACL(ctx, username, password).Err() } else { err = conn.Auth(ctx, password).Err() } if err != nil { return fmt.Errorf("failed to authenticate: %w", err) } } _, err = conn.Pipelined(ctx, func(pipe Pipeliner) error { if c.opt.DB > 0 { pipe.Select(ctx, c.opt.DB) } if c.opt.readOnly { pipe.ReadOnly(ctx) } if c.opt.ClientName != "" { pipe.ClientSetName(ctx, c.opt.ClientName) } return nil }) if err != nil { return fmt.Errorf("failed to initialize connection options: %w", err) } if !c.opt.DisableIdentity && !c.opt.DisableIndentity { libName := "" libVer := Version() if c.opt.IdentitySuffix != "" { libName = c.opt.IdentitySuffix } p := conn.Pipeline() p.ClientSetInfo(ctx, WithLibraryName(libName)) p.ClientSetInfo(ctx, WithLibraryVersion(libVer)) // Handle network errors (e.g. timeouts) in CLIENT SETINFO to avoid // out of order responses later on. if _, err = p.Exec(ctx); err != nil && !isRedisError(err) { return err } } if c.opt.OnConnect != nil { return c.opt.OnConnect(ctx, conn) } return nil } func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) { if c.opt.Limiter != nil { c.opt.Limiter.ReportResult(err) } if isBadConn(err, false, c.opt.Addr) { c.connPool.Remove(ctx, cn, err) } else { c.connPool.Put(ctx, cn) } } func (c *baseClient) withConn( ctx context.Context, fn func(context.Context, *pool.Conn) error, ) error { cn, err := c.getConn(ctx) if err != nil { return err } var fnErr error defer func() { c.releaseConn(ctx, cn, fnErr) }() fnErr = fn(ctx, cn) return fnErr } func (c *baseClient) dial(ctx context.Context, network, addr string) (net.Conn, error) { return c.opt.Dialer(ctx, network, addr) } func (c *baseClient) process(ctx context.Context, cmd Cmder) error { var lastErr error for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { attempt := attempt retry, err := c._process(ctx, cmd, attempt) if err == nil || !retry { return err } lastErr = err } return lastErr } func (c *baseClient) assertUnstableCommand(cmd Cmder) bool { switch cmd.(type) { case *AggregateCmd, *FTInfoCmd, *FTSpellCheckCmd, *FTSearchCmd, *FTSynDumpCmd: if c.opt.UnstableResp3 { return true } else { panic("RESP3 responses for this command are disabled because they may still change. Please set the flag UnstableResp3 . See the [README](https://github.com/redis/go-redis/blob/master/README.md) and the release notes for guidance.") } default: return false } } func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) { if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { return false, err } } retryTimeout := uint32(0) if err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmd(wr, cmd) }); err != nil { atomic.StoreUint32(&retryTimeout, 1) return err } readReplyFunc := cmd.readReply // Apply unstable RESP3 search module. if c.opt.Protocol != 2 && c.assertUnstableCommand(cmd) { readReplyFunc = cmd.readRawReply } if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), readReplyFunc); err != nil { if cmd.readTimeout() == nil { atomic.StoreUint32(&retryTimeout, 1) } else { atomic.StoreUint32(&retryTimeout, 0) } return err } return nil }); err != nil { retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1) return retry, err } return false, nil } func (c *baseClient) retryBackoff(attempt int) time.Duration { return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) } func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { if timeout := cmd.readTimeout(); timeout != nil { t := *timeout if t == 0 { return 0 } return t + 10*time.Second } return c.opt.ReadTimeout } // context returns the context for the current connection. // If the context timeout is enabled, it returns the original context. // Otherwise, it returns a new background context. func (c *baseClient) context(ctx context.Context) context.Context { if c.opt.ContextTimeoutEnabled { return ctx } return context.Background() } // Close closes the client, releasing any open resources. // // It is rare to Close a Client, as the Client is meant to be // long-lived and shared between many goroutines. func (c *baseClient) Close() error { var firstErr error if c.onClose != nil { if err := c.onClose(); err != nil { firstErr = err } } if err := c.connPool.Close(); err != nil && firstErr == nil { firstErr = err } return firstErr } func (c *baseClient) getAddr() string { return c.opt.Addr } func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error { if err := c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds); err != nil { return err } return cmdsFirstErr(cmds) } func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { if err := c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds); err != nil { return err } return cmdsFirstErr(cmds) } type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error) func (c *baseClient) generalProcessPipeline( ctx context.Context, cmds []Cmder, p pipelineProcessor, ) error { var lastErr error for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { setCmdsErr(cmds, err) return err } } // Enable retries by default to retry dial errors returned by withConn. canRetry := true lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { var err error canRetry, err = p(ctx, cn, cmds) return err }) if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) { return lastErr } } return lastErr } func (c *baseClient) pipelineProcessCmds( ctx context.Context, cn *pool.Conn, cmds []Cmder, ) (bool, error) { if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { setCmdsErr(cmds, err) return true, err } if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { return pipelineReadCmds(rd, cmds) }); err != nil { return true, err } return false, nil } func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { for i, cmd := range cmds { err := cmd.readReply(rd) cmd.SetErr(err) if err != nil && !isRedisError(err) { setCmdsErr(cmds[i+1:], err) return err } } // Retry errors like "LOADING redis is loading the dataset in memory". return cmds[0].Err() } func (c *baseClient) txPipelineProcessCmds( ctx context.Context, cn *pool.Conn, cmds []Cmder, ) (bool, error) { if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { setCmdsErr(cmds, err) return true, err } if err := cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { statusCmd := cmds[0].(*StatusCmd) // Trim multi and exec. trimmedCmds := cmds[1 : len(cmds)-1] if err := txPipelineReadQueued(rd, statusCmd, trimmedCmds); err != nil { setCmdsErr(cmds, err) return err } return pipelineReadCmds(rd, trimmedCmds) }); err != nil { return false, err } return false, nil } func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { // Parse +OK. if err := statusCmd.readReply(rd); err != nil { return err } // Parse +QUEUED. for range cmds { if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) { return err } } // Parse number of replies. line, err := rd.ReadLine() if err != nil { if err == Nil { err = TxFailedErr } return err } if line[0] != proto.RespArray { return fmt.Errorf("redis: expected '*', but got line %q", line) } return nil } //------------------------------------------------------------------------------ // Client is a Redis client representing a pool of zero or more underlying connections. // It's safe for concurrent use by multiple goroutines. // // Client creates and frees connections automatically; it also maintains a free pool // of idle connections. You can control the pool size with Config.PoolSize option. type Client struct { *baseClient cmdable } // NewClient returns a client to the Redis Server specified by Options. func NewClient(opt *Options) *Client { if opt == nil { panic("redis: NewClient nil options") } opt.init() c := Client{ baseClient: &baseClient{ opt: opt, }, } c.init() c.connPool = newConnPool(opt, c.dialHook) return &c } func (c *Client) init() { c.cmdable = c.Process c.initHooks(hooks{ dial: c.baseClient.dial, process: c.baseClient.process, pipeline: c.baseClient.processPipeline, txPipeline: c.baseClient.processTxPipeline, }) } func (c *Client) WithTimeout(timeout time.Duration) *Client { clone := *c clone.baseClient = c.baseClient.withTimeout(timeout) clone.init() return &clone } func (c *Client) Conn() *Conn { return newConn(c.opt, pool.NewStickyConnPool(c.connPool), &c.hooksMixin) } // Do create a Cmd from the args and processes the cmd. func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd { cmd := NewCmd(ctx, args...) _ = c.Process(ctx, cmd) return cmd } func (c *Client) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) return err } // Options returns read-only Options that were used to create the client. func (c *Client) Options() *Options { return c.opt } type PoolStats pool.Stats // PoolStats returns connection pool stats. func (c *Client) PoolStats() *PoolStats { stats := c.connPool.Stats() return (*PoolStats)(stats) } func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { return c.Pipeline().Pipelined(ctx, fn) } func (c *Client) Pipeline() Pipeliner { pipe := Pipeline{ exec: pipelineExecer(c.processPipelineHook), } pipe.init() return &pipe } func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { return c.TxPipeline().Pipelined(ctx, fn) } // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. func (c *Client) TxPipeline() Pipeliner { pipe := Pipeline{ exec: func(ctx context.Context, cmds []Cmder) error { cmds = wrapMultiExec(ctx, cmds) return c.processTxPipelineHook(ctx, cmds) }, } pipe.init() return &pipe } func (c *Client) pubSub() *PubSub { pubsub := &PubSub{ opt: c.opt, newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { return c.newConn(ctx) }, closeConn: c.connPool.CloseConn, } pubsub.init() return pubsub } // Subscribe subscribes the client to the specified channels. // Channels can be omitted to create empty subscription. // Note that this method does not wait on a response from Redis, so the // subscription may not be active immediately. To force the connection to wait, // you may call the Receive() method on the returned *PubSub like so: // // sub := client.Subscribe(queryResp) // iface, err := sub.Receive() // if err != nil { // // handle error // } // // // Should be *Subscription, but others are possible if other actions have been // // taken on sub since it was created. // switch iface.(type) { // case *Subscription: // // subscribe succeeded // case *Message: // // received first message // case *Pong: // // pong received // default: // // handle error // } // // ch := sub.Channel() func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub { pubsub := c.pubSub() if len(channels) > 0 { _ = pubsub.Subscribe(ctx, channels...) } return pubsub } // PSubscribe subscribes the client to the given patterns. // Patterns can be omitted to create empty subscription. func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub { pubsub := c.pubSub() if len(channels) > 0 { _ = pubsub.PSubscribe(ctx, channels...) } return pubsub } // SSubscribe Subscribes the client to the specified shard channels. // Channels can be omitted to create empty subscription. func (c *Client) SSubscribe(ctx context.Context, channels ...string) *PubSub { pubsub := c.pubSub() if len(channels) > 0 { _ = pubsub.SSubscribe(ctx, channels...) } return pubsub } //------------------------------------------------------------------------------ // Conn represents a single Redis connection rather than a pool of connections. // Prefer running commands from Client unless there is a specific need // for a continuous single Redis connection. type Conn struct { baseClient cmdable statefulCmdable } // newConn is a helper func to create a new Conn instance. // the Conn instance is not thread-safe and should not be shared between goroutines. // the parentHooks will be cloned, no need to clone before passing it. func newConn(opt *Options, connPool pool.Pooler, parentHooks *hooksMixin) *Conn { c := Conn{ baseClient: baseClient{ opt: opt, connPool: connPool, }, } if parentHooks != nil { c.hooksMixin = parentHooks.clone() } c.cmdable = c.Process c.statefulCmdable = c.Process c.initHooks(hooks{ dial: c.baseClient.dial, process: c.baseClient.process, pipeline: c.baseClient.processPipeline, txPipeline: c.baseClient.processTxPipeline, }) return &c } func (c *Conn) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) return err } func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { return c.Pipeline().Pipelined(ctx, fn) } func (c *Conn) Pipeline() Pipeliner { pipe := Pipeline{ exec: c.processPipelineHook, } pipe.init() return &pipe } func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { return c.TxPipeline().Pipelined(ctx, fn) } // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. func (c *Conn) TxPipeline() Pipeliner { pipe := Pipeline{ exec: func(ctx context.Context, cmds []Cmder) error { cmds = wrapMultiExec(ctx, cmds) return c.processTxPipelineHook(ctx, cmds) }, } pipe.init() return &pipe } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/options.go0000644000000000000000000004067515024302467023753 0ustar rootrootpackage redis import ( "context" "crypto/tls" "errors" "fmt" "net" "net/url" "runtime" "sort" "strconv" "strings" "time" "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal/pool" ) // Limiter is the interface of a rate limiter or a circuit breaker. type Limiter interface { // Allow returns nil if operation is allowed or an error otherwise. // If operation is allowed client must ReportResult of the operation // whether it is a success or a failure. Allow() error // ReportResult reports the result of the previously allowed operation. // nil indicates a success, non-nil error usually indicates a failure. ReportResult(result error) } // Options keeps the settings to set up redis connection. type Options struct { // Network type, either tcp or unix. // // default: is tcp. Network string // Addr is the address formated as host:port Addr string // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. ClientName string // Dialer creates new network connection and has priority over // Network and Addr options. Dialer func(ctx context.Context, network, addr string) (net.Conn, error) // Hook that is called when new connection is established. OnConnect func(ctx context.Context, cn *Conn) error // Protocol 2 or 3. Use the version to negotiate RESP version with redis-server. // // default: 3. Protocol int // Username is used to authenticate the current connection // with one of the connections defined in the ACL list when connecting // to a Redis 6.0 instance, or greater, that is using the Redis ACL system. Username string // Password is an optional password. Must match the password specified in the // `requirepass` server configuration option (if connecting to a Redis 5.0 instance, or lower), // or the User Password when connecting to a Redis 6.0 instance, or greater, // that is using the Redis ACL system. Password string // CredentialsProvider allows the username and password to be updated // before reconnecting. It should return the current username and password. CredentialsProvider func() (username string, password string) // CredentialsProviderContext is an enhanced parameter of CredentialsProvider, // done to maintain API compatibility. In the future, // there might be a merge between CredentialsProviderContext and CredentialsProvider. // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) // StreamingCredentialsProvider is used to retrieve the credentials // for the connection from an external source. Those credentials may change // during the connection lifetime. This is useful for managed identity // scenarios where the credentials are retrieved from an external source. // // Currently, this is a placeholder for the future implementation. StreamingCredentialsProvider auth.StreamingCredentialsProvider // DB is the database to be selected after connecting to the server. DB int // MaxRetries is the maximum number of retries before giving up. // -1 (not 0) disables retries. // // default: 3 retries MaxRetries int // MinRetryBackoff is the minimum backoff between each retry. // -1 disables backoff. // // default: 8 milliseconds MinRetryBackoff time.Duration // MaxRetryBackoff is the maximum backoff between each retry. // -1 disables backoff. // default: 512 milliseconds; MaxRetryBackoff time.Duration // DialTimeout for establishing new connections. // // default: 5 seconds DialTimeout time.Duration // ReadTimeout for socket reads. If reached, commands will fail // with a timeout instead of blocking. Supported values: // // - `-1` - no timeout (block indefinitely). // - `-2` - disables SetReadDeadline calls completely. // // default: 3 seconds ReadTimeout time.Duration // WriteTimeout for socket writes. If reached, commands will fail // with a timeout instead of blocking. Supported values: // // - `-1` - no timeout (block indefinitely). // - `-2` - disables SetWriteDeadline calls completely. // // default: 3 seconds WriteTimeout time.Duration // ContextTimeoutEnabled controls whether the client respects context timeouts and deadlines. // See https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts ContextTimeoutEnabled bool // PoolFIFO type of connection pool. // // - true for FIFO pool // - false for LIFO pool. // // Note that FIFO has slightly higher overhead compared to LIFO, // but it helps closing idle connections faster reducing the pool size. PoolFIFO bool // PoolSize is the base number of socket connections. // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS. // If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize, // you can limit it through MaxActiveConns // // default: 10 * runtime.GOMAXPROCS(0) PoolSize int // PoolTimeout is the amount of time client waits for connection if all connections // are busy before returning an error. // // default: ReadTimeout + 1 second PoolTimeout time.Duration // MinIdleConns is the minimum number of idle connections which is useful when establishing // new connection is slow. The idle connections are not closed by default. // // default: 0 MinIdleConns int // MaxIdleConns is the maximum number of idle connections. // The idle connections are not closed by default. // // default: 0 MaxIdleConns int // MaxActiveConns is the maximum number of connections allocated by the pool at a given time. // When zero, there is no limit on the number of connections in the pool. // If the pool is full, the next call to Get() will block until a connection is released. MaxActiveConns int // ConnMaxIdleTime is the maximum amount of time a connection may be idle. // Should be less than server's timeout. // // Expired connections may be closed lazily before reuse. // If d <= 0, connections are not closed due to a connection's idle time. // -1 disables idle timeout check. // // default: 30 minutes ConnMaxIdleTime time.Duration // ConnMaxLifetime is the maximum amount of time a connection may be reused. // // Expired connections may be closed lazily before reuse. // If <= 0, connections are not closed due to a connection's age. // // default: 0 ConnMaxLifetime time.Duration // TLSConfig to use. When set, TLS will be negotiated. TLSConfig *tls.Config // Limiter interface used to implement circuit breaker or rate limiter. Limiter Limiter // readOnly enables read only queries on slave/follower nodes. readOnly bool // DisableIndentity - Disable set-lib on connect. // // default: false // // Deprecated: Use DisableIdentity instead. DisableIndentity bool // DisableIdentity is used to disable CLIENT SETINFO command on connect. // // default: false DisableIdentity bool // Add suffix to client name. Default is empty. // IdentitySuffix - add suffix to client name. IdentitySuffix string // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. // When unstable mode is enabled, the client will use RESP3 protocol and only be able to use RawResult UnstableResp3 bool } func (opt *Options) init() { if opt.Addr == "" { opt.Addr = "localhost:6379" } if opt.Network == "" { if strings.HasPrefix(opt.Addr, "/") { opt.Network = "unix" } else { opt.Network = "tcp" } } if opt.Protocol < 2 { opt.Protocol = 3 } if opt.DialTimeout == 0 { opt.DialTimeout = 5 * time.Second } if opt.Dialer == nil { opt.Dialer = NewDialer(opt) } if opt.PoolSize == 0 { opt.PoolSize = 10 * runtime.GOMAXPROCS(0) } switch opt.ReadTimeout { case -2: opt.ReadTimeout = -1 case -1: opt.ReadTimeout = 0 case 0: opt.ReadTimeout = 3 * time.Second } switch opt.WriteTimeout { case -2: opt.WriteTimeout = -1 case -1: opt.WriteTimeout = 0 case 0: opt.WriteTimeout = opt.ReadTimeout } if opt.PoolTimeout == 0 { if opt.ReadTimeout > 0 { opt.PoolTimeout = opt.ReadTimeout + time.Second } else { opt.PoolTimeout = 30 * time.Second } } if opt.ConnMaxIdleTime == 0 { opt.ConnMaxIdleTime = 30 * time.Minute } switch opt.MaxRetries { case -1: opt.MaxRetries = 0 case 0: opt.MaxRetries = 3 } switch opt.MinRetryBackoff { case -1: opt.MinRetryBackoff = 0 case 0: opt.MinRetryBackoff = 8 * time.Millisecond } switch opt.MaxRetryBackoff { case -1: opt.MaxRetryBackoff = 0 case 0: opt.MaxRetryBackoff = 512 * time.Millisecond } } func (opt *Options) clone() *Options { clone := *opt return &clone } // NewDialer returns a function that will be used as the default dialer // when none is specified in Options.Dialer. func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, addr string) (net.Conn, error) { netDialer := &net.Dialer{ Timeout: opt.DialTimeout, KeepAlive: 5 * time.Minute, } if opt.TLSConfig == nil { return netDialer.DialContext(ctx, network, addr) } return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig) } } // ParseURL parses a URL into Options that can be used to connect to Redis. // Scheme is required. // There are two connection types: by tcp socket and by unix socket. // Tcp connection: // // redis://:@:/ // // Unix connection: // // unix://:@?db= // // Most Option fields can be set using query parameters, with the following restrictions: // - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries // - only scalar type fields are supported (bool, int, time.Duration) // - for time.Duration fields, values must be a valid input for time.ParseDuration(); // additionally a plain integer as value (i.e. without unit) is interpreted as seconds // - to disable a duration field, use value less than or equal to 0; to use the default // value, leave the value blank or remove the parameter // - only the last value is interpreted if a parameter is given multiple times // - fields "network", "addr", "username" and "password" can only be set using other // URL attributes (scheme, host, userinfo, resp.), query parameters using these // names will be treated as unknown parameters // - unknown parameter names will result in an error // - use "skip_verify=true" to ignore TLS certificate validation // // Examples: // // redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2 // is equivalent to: // &Options{ // Network: "tcp", // Addr: "localhost:6789", // DB: 1, // path "/3" was overridden by "&db=1" // DialTimeout: 3 * time.Second, // no time unit = seconds // ReadTimeout: 6 * time.Second, // MaxRetries: 2, // } func ParseURL(redisURL string) (*Options, error) { u, err := url.Parse(redisURL) if err != nil { return nil, err } switch u.Scheme { case "redis", "rediss": return setupTCPConn(u) case "unix": return setupUnixConn(u) default: return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) } } func setupTCPConn(u *url.URL) (*Options, error) { o := &Options{Network: "tcp"} o.Username, o.Password = getUserPassword(u) h, p := getHostPortWithDefaults(u) o.Addr = net.JoinHostPort(h, p) f := strings.FieldsFunc(u.Path, func(r rune) bool { return r == '/' }) switch len(f) { case 0: o.DB = 0 case 1: var err error if o.DB, err = strconv.Atoi(f[0]); err != nil { return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) } default: return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) } if u.Scheme == "rediss" { o.TLSConfig = &tls.Config{ ServerName: h, MinVersion: tls.VersionTLS12, } } return setupConnParams(u, o) } // getHostPortWithDefaults is a helper function that splits the url into // a host and a port. If the host is missing, it defaults to localhost // and if the port is missing, it defaults to 6379. func getHostPortWithDefaults(u *url.URL) (string, string) { host, port, err := net.SplitHostPort(u.Host) if err != nil { host = u.Host } if host == "" { host = "localhost" } if port == "" { port = "6379" } return host, port } func setupUnixConn(u *url.URL) (*Options, error) { o := &Options{ Network: "unix", } if strings.TrimSpace(u.Path) == "" { // path is required with unix connection return nil, errors.New("redis: empty unix socket path") } o.Addr = u.Path o.Username, o.Password = getUserPassword(u) return setupConnParams(u, o) } type queryOptions struct { q url.Values err error } func (o *queryOptions) has(name string) bool { return len(o.q[name]) > 0 } func (o *queryOptions) string(name string) string { vs := o.q[name] if len(vs) == 0 { return "" } delete(o.q, name) // enable detection of unknown parameters return vs[len(vs)-1] } func (o *queryOptions) strings(name string) []string { vs := o.q[name] delete(o.q, name) return vs } func (o *queryOptions) int(name string) int { s := o.string(name) if s == "" { return 0 } i, err := strconv.Atoi(s) if err == nil { return i } if o.err == nil { o.err = fmt.Errorf("redis: invalid %s number: %s", name, err) } return 0 } func (o *queryOptions) duration(name string) time.Duration { s := o.string(name) if s == "" { return 0 } // try plain number first if i, err := strconv.Atoi(s); err == nil { if i <= 0 { // disable timeouts return -1 } return time.Duration(i) * time.Second } dur, err := time.ParseDuration(s) if err == nil { return dur } if o.err == nil { o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err) } return 0 } func (o *queryOptions) bool(name string) bool { switch s := o.string(name); s { case "true", "1": return true case "false", "0", "": return false default: if o.err == nil { o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s) } return false } } func (o *queryOptions) remaining() []string { if len(o.q) == 0 { return nil } keys := make([]string, 0, len(o.q)) for k := range o.q { keys = append(keys, k) } sort.Strings(keys) return keys } // setupConnParams converts query parameters in u to option value in o. func setupConnParams(u *url.URL, o *Options) (*Options, error) { q := queryOptions{q: u.Query()} // compat: a future major release may use q.int("db") if tmp := q.string("db"); tmp != "" { db, err := strconv.Atoi(tmp) if err != nil { return nil, fmt.Errorf("redis: invalid database number: %w", err) } o.DB = db } o.Protocol = q.int("protocol") o.ClientName = q.string("client_name") o.MaxRetries = q.int("max_retries") o.MinRetryBackoff = q.duration("min_retry_backoff") o.MaxRetryBackoff = q.duration("max_retry_backoff") o.DialTimeout = q.duration("dial_timeout") o.ReadTimeout = q.duration("read_timeout") o.WriteTimeout = q.duration("write_timeout") o.PoolFIFO = q.bool("pool_fifo") o.PoolSize = q.int("pool_size") o.PoolTimeout = q.duration("pool_timeout") o.MinIdleConns = q.int("min_idle_conns") o.MaxIdleConns = q.int("max_idle_conns") o.MaxActiveConns = q.int("max_active_conns") if q.has("conn_max_idle_time") { o.ConnMaxIdleTime = q.duration("conn_max_idle_time") } else { o.ConnMaxIdleTime = q.duration("idle_timeout") } if q.has("conn_max_lifetime") { o.ConnMaxLifetime = q.duration("conn_max_lifetime") } else { o.ConnMaxLifetime = q.duration("max_conn_age") } if q.err != nil { return nil, q.err } if o.TLSConfig != nil && q.has("skip_verify") { o.TLSConfig.InsecureSkipVerify = q.bool("skip_verify") } // any parameters left? if r := q.remaining(); len(r) > 0 { return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) } return o, nil } func getUserPassword(u *url.URL) (string, string) { var user, password string if u.User != nil { user = u.User.Username() if p, ok := u.User.Password(); ok { password = p } } return user, password } func newConnPool( opt *Options, dialer func(ctx context.Context, network, addr string) (net.Conn, error), ) *pool.ConnPool { return pool.NewConnPool(&pool.Options{ Dialer: func(ctx context.Context) (net.Conn, error) { return dialer(ctx, opt.Network, opt.Addr) }, PoolFIFO: opt.PoolFIFO, PoolSize: opt.PoolSize, PoolTimeout: opt.PoolTimeout, DialTimeout: opt.DialTimeout, MinIdleConns: opt.MinIdleConns, MaxIdleConns: opt.MaxIdleConns, MaxActiveConns: opt.MaxActiveConns, ConnMaxIdleTime: opt.ConnMaxIdleTime, ConnMaxLifetime: opt.ConnMaxLifetime, }) } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/bitmap_commands_test.go0000644000000000000000000000432015024302467026437 0ustar rootrootpackage redis_test import ( . "github.com/bsm/ginkgo/v2" . "github.com/bsm/gomega" "github.com/redis/go-redis/v9" ) type bitCountExpected struct { Start int64 End int64 Expected int64 } var _ = Describe("BitCountBite", func() { var client *redis.Client key := "bit_count_test" BeforeEach(func() { client = redis.NewClient(redisOptions()) Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) values := []int{0, 1, 0, 0, 1, 0, 1, 0, 1, 1} for i, v := range values { cmd := client.SetBit(ctx, key, int64(i), v) Expect(cmd.Err()).NotTo(HaveOccurred()) } }) AfterEach(func() { Expect(client.Close()).NotTo(HaveOccurred()) }) It("bit count bite", func() { var expected = []bitCountExpected{ {0, 0, 0}, {0, 1, 1}, {0, 2, 1}, {0, 3, 1}, {0, 4, 2}, {0, 5, 2}, {0, 6, 3}, {0, 7, 3}, {0, 8, 4}, {0, 9, 5}, } for _, e := range expected { cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End, Unit: redis.BitCountIndexBit}) Expect(cmd.Err()).NotTo(HaveOccurred()) Expect(cmd.Val()).To(Equal(e.Expected)) } }) }) var _ = Describe("BitCountByte", func() { var client *redis.Client key := "bit_count_test" BeforeEach(func() { client = redis.NewClient(redisOptions()) Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) values := []int{0, 0, 0, 0, 0, 0, 0, 1, 1, 1} for i, v := range values { cmd := client.SetBit(ctx, key, int64(i), v) Expect(cmd.Err()).NotTo(HaveOccurred()) } }) AfterEach(func() { Expect(client.Close()).NotTo(HaveOccurred()) }) It("bit count byte", func() { var expected = []bitCountExpected{ {0, 0, 1}, {0, 1, 3}, } for _, e := range expected { cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End, Unit: redis.BitCountIndexByte}) Expect(cmd.Err()).NotTo(HaveOccurred()) Expect(cmd.Val()).To(Equal(e.Expected)) } }) It("bit count byte with no unit specified", func() { var expected = []bitCountExpected{ {0, 0, 1}, {0, 1, 3}, } for _, e := range expected { cmd := client.BitCount(ctx, key, &redis.BitCount{Start: e.Start, End: e.End}) Expect(cmd.Err()).NotTo(HaveOccurred()) Expect(cmd.Val()).To(Equal(e.Expected)) } }) }) dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/LICENSE0000644000000000000000000000244615024302467022730 0ustar rootrootCopyright (c) 2013 The github.com/redis/go-redis Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/timeseries_commands_test.go0000644000000000000000000017215615024302467027351 0ustar rootrootpackage redis_test import ( "context" "fmt" "strings" . "github.com/bsm/ginkgo/v2" . "github.com/bsm/gomega" "github.com/redis/go-redis/v9" ) var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() { ctx := context.TODO() setupRedisClient := func(protocolVersion int) *redis.Client { return redis.NewClient(&redis.Options{ Addr: "localhost:6379", DB: 0, Protocol: protocolVersion, UnstableResp3: true, }) } protocols := []int{2, 3} for _, protocol := range protocols { protocol := protocol // capture loop variable for each context Context(fmt.Sprintf("with protocol version %d", protocol), func() { var client *redis.Client BeforeEach(func() { client = setupRedisClient(protocol) Expect(client.FlushAll(ctx).Err()).NotTo(HaveOccurred()) }) AfterEach(func() { if client != nil { client.FlushDB(ctx) client.Close() } }) It("should TSCreate and TSCreateWithArgs", Label("timeseries", "tscreate", "tscreateWithArgs", "NonRedisEnterprise"), func() { SkipBeforeRedisVersion(7.4, "older redis stack has different results for timeseries module") result, err := client.TSCreate(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) // Test TSCreateWithArgs opt := &redis.TSOptions{Retention: 5} result, err = client.TSCreateWithArgs(ctx, "2", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs"}} result, err = client.TSCreateWithArgs(ctx, "3", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) opt = &redis.TSOptions{Labels: map[string]string{"Time": "Series"}, Retention: 20} result, err = client.TSCreateWithArgs(ctx, "4", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) resultInfo, err := client.TSInfo(ctx, "4").Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(resultInfo["labels"].([]interface{})[0]).To(BeEquivalentTo([]interface{}{"Time", "Series"})) } else { Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series")) } // Test chunk size opt = &redis.TSOptions{ChunkSize: 128} result, err = client.TSCreateWithArgs(ctx, "ts-cs-1", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) resultInfo, err = client.TSInfo(ctx, "ts-cs-1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128)) // Test duplicate policy duplicate_policies := []string{"BLOCK", "LAST", "FIRST", "MIN", "MAX"} for _, dup := range duplicate_policies { keyName := "ts-dup-" + dup opt = &redis.TSOptions{DuplicatePolicy: dup} result, err = client.TSCreateWithArgs(ctx, keyName, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) resultInfo, err = client.TSInfo(ctx, keyName).Result() Expect(err).NotTo(HaveOccurred()) Expect(strings.ToUpper(resultInfo["duplicatePolicy"].(string))).To(BeEquivalentTo(dup)) } // Test insertion filters opt = &redis.TSOptions{IgnoreMaxTimeDiff: 5, DuplicatePolicy: "LAST", IgnoreMaxValDiff: 10.0} result, err = client.TSCreateWithArgs(ctx, "ts-if-1", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) resultAdd, err := client.TSAdd(ctx, "ts-if-1", 1000, 1.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1000)) resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1010, 11.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1010)) resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1013, 10.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1010)) resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1020, 11.5).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1020)) resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1021, 22.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1021)) rangePoints, err := client.TSRange(ctx, "ts-if-1", 1000, 1021).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(rangePoints)).To(BeEquivalentTo(4)) Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{ {Timestamp: 1000, Value: 1.0}, {Timestamp: 1010, Value: 11.0}, {Timestamp: 1020, Value: 11.5}, {Timestamp: 1021, Value: 22.0}})) // Test insertion filters with other duplicate policy opt = &redis.TSOptions{IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0} result, err = client.TSCreateWithArgs(ctx, "ts-if-2", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) resultAdd1, err := client.TSAdd(ctx, "ts-if-1", 1000, 1.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd1).To(BeEquivalentTo(1000)) resultAdd1, err = client.TSAdd(ctx, "ts-if-1", 1010, 11.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd1).To(BeEquivalentTo(1010)) resultAdd1, err = client.TSAdd(ctx, "ts-if-1", 1013, 10.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd1).To(BeEquivalentTo(1013)) rangePoints, err = client.TSRange(ctx, "ts-if-1", 1000, 1013).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(rangePoints)).To(BeEquivalentTo(3)) Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{ {Timestamp: 1000, Value: 1.0}, {Timestamp: 1010, Value: 11.0}, {Timestamp: 1013, Value: 10.0}})) }) It("should TSAdd and TSAddWithArgs", Label("timeseries", "tsadd", "tsaddWithArgs", "NonRedisEnterprise"), func() { SkipBeforeRedisVersion(7.4, "older redis stack has different results for timeseries module") result, err := client.TSAdd(ctx, "1", 1, 1).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) // Test TSAddWithArgs opt := &redis.TSOptions{Retention: 10} result, err = client.TSAddWithArgs(ctx, "2", 2, 3, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(2)) opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs"}} result, err = client.TSAddWithArgs(ctx, "3", 3, 2, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(3)) opt = &redis.TSOptions{Labels: map[string]string{"Redis": "Labs", "Time": "Series"}, Retention: 10} result, err = client.TSAddWithArgs(ctx, "4", 4, 2, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(4)) resultInfo, err := client.TSInfo(ctx, "4").Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(resultInfo["labels"].([]interface{})).To(ContainElement([]interface{}{"Time", "Series"})) } else { Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series")) } // Test chunk size opt = &redis.TSOptions{ChunkSize: 128} result, err = client.TSAddWithArgs(ctx, "ts-cs-1", 1, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) resultInfo, err = client.TSInfo(ctx, "ts-cs-1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128)) // Test duplicate policy // LAST opt = &redis.TSOptions{DuplicatePolicy: "LAST"} result, err = client.TSAddWithArgs(ctx, "tsal-1", 1, 5, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) result, err = client.TSAddWithArgs(ctx, "tsal-1", 1, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) resultGet, err := client.TSGet(ctx, "tsal-1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet.Value).To(BeEquivalentTo(10)) // FIRST opt = &redis.TSOptions{DuplicatePolicy: "FIRST"} result, err = client.TSAddWithArgs(ctx, "tsaf-1", 1, 5, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) result, err = client.TSAddWithArgs(ctx, "tsaf-1", 1, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) resultGet, err = client.TSGet(ctx, "tsaf-1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet.Value).To(BeEquivalentTo(5)) // MAX opt = &redis.TSOptions{DuplicatePolicy: "MAX"} result, err = client.TSAddWithArgs(ctx, "tsam-1", 1, 5, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) result, err = client.TSAddWithArgs(ctx, "tsam-1", 1, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) resultGet, err = client.TSGet(ctx, "tsam-1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet.Value).To(BeEquivalentTo(10)) // MIN opt = &redis.TSOptions{DuplicatePolicy: "MIN"} result, err = client.TSAddWithArgs(ctx, "tsami-1", 1, 5, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) result, err = client.TSAddWithArgs(ctx, "tsami-1", 1, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) resultGet, err = client.TSGet(ctx, "tsami-1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet.Value).To(BeEquivalentTo(5)) // Insertion filters opt = &redis.TSOptions{IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0, DuplicatePolicy: "LAST"} result, err = client.TSAddWithArgs(ctx, "ts-if-1", 1000, 1.0, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1000)) result, err = client.TSAddWithArgs(ctx, "ts-if-1", 1004, 3.0, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1000)) rangePoints, err := client.TSRange(ctx, "ts-if-1", 1000, 1004).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(rangePoints)).To(BeEquivalentTo(1)) Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: 1.0}})) }) It("should TSAlter", Label("timeseries", "tsalter", "NonRedisEnterprise"), func() { SkipBeforeRedisVersion(7.4, "older redis stack has different results for timeseries module") result, err := client.TSCreate(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) resultInfo, err := client.TSInfo(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(0)) opt := &redis.TSAlterOptions{Retention: 10} resultAlter, err := client.TSAlter(ctx, "1", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAlter).To(BeEquivalentTo("OK")) resultInfo, err = client.TSInfo(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(10)) resultInfo, err = client.TSInfo(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(resultInfo["labels"]).To(BeEquivalentTo([]interface{}{})) } else { Expect(resultInfo["labels"]).To(BeEquivalentTo(map[interface{}]interface{}{})) } opt = &redis.TSAlterOptions{Labels: map[string]string{"Time": "Series"}} resultAlter, err = client.TSAlter(ctx, "1", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAlter).To(BeEquivalentTo("OK")) resultInfo, err = client.TSInfo(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(resultInfo["labels"].([]interface{})[0]).To(BeEquivalentTo([]interface{}{"Time", "Series"})) Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(10)) if RedisVersion >= 8 { Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo("block")) } else { // Older versions of Redis had a bug where the duplicate policy was not set correctly Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo(redis.Nil)) } } else { Expect(resultInfo["labels"].(map[interface{}]interface{})["Time"]).To(BeEquivalentTo("Series")) Expect(resultInfo["retentionTime"]).To(BeEquivalentTo(10)) if RedisVersion >= 8 { Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo("block")) } else { // Older versions of Redis had a bug where the duplicate policy was not set correctly Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo(redis.Nil)) } } opt = &redis.TSAlterOptions{DuplicatePolicy: "min"} resultAlter, err = client.TSAlter(ctx, "1", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAlter).To(BeEquivalentTo("OK")) resultInfo, err = client.TSInfo(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo("min")) // Test insertion filters resultAdd, err := client.TSAdd(ctx, "ts-if-1", 1000, 1.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1000)) resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1010, 11.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1010)) resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1013, 10.0).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1013)) alterOpt := &redis.TSAlterOptions{IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0, DuplicatePolicy: "LAST"} resultAlter, err = client.TSAlter(ctx, "ts-if-1", alterOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAlter).To(BeEquivalentTo("OK")) resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1015, 11.5).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(1013)) rangePoints, err := client.TSRange(ctx, "ts-if-1", 1000, 1013).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(rangePoints)).To(BeEquivalentTo(3)) Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{ {Timestamp: 1000, Value: 1.0}, {Timestamp: 1010, Value: 11.0}, {Timestamp: 1013, Value: 10.0}})) }) It("should TSCreateRule and TSDeleteRule", Label("timeseries", "tscreaterule", "tsdeleterule"), func() { result, err := client.TSCreate(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) result, err = client.TSCreate(ctx, "2").Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) result, err = client.TSCreateRule(ctx, "1", "2", redis.Avg, 100).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) for i := 0; i < 50; i++ { resultAdd, err := client.TSAdd(ctx, "1", 100+i*2, 1).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(100 + i*2)) resultAdd, err = client.TSAdd(ctx, "1", 100+i*2+1, 2).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(100 + i*2 + 1)) } resultAdd, err := client.TSAdd(ctx, "1", 100*2, 1.5).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultAdd).To(BeEquivalentTo(100 * 2)) resultGet, err := client.TSGet(ctx, "2").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet.Value).To(BeEquivalentTo(1.5)) Expect(resultGet.Timestamp).To(BeEquivalentTo(100)) resultDeleteRule, err := client.TSDeleteRule(ctx, "1", "2").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultDeleteRule).To(BeEquivalentTo("OK")) resultInfo, err := client.TSInfo(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(resultInfo["rules"]).To(BeEquivalentTo([]interface{}{})) } else { Expect(resultInfo["rules"]).To(BeEquivalentTo(map[interface{}]interface{}{})) } }) It("should TSIncrBy, TSIncrByWithArgs, TSDecrBy and TSDecrByWithArgs", Label("timeseries", "tsincrby", "tsdecrby", "tsincrbyWithArgs", "tsdecrbyWithArgs", "NonRedisEnterprise"), func() { SkipBeforeRedisVersion(7.4, "older redis stack has different results for timeseries module") for i := 0; i < 100; i++ { _, err := client.TSIncrBy(ctx, "1", 1).Result() Expect(err).NotTo(HaveOccurred()) } result, err := client.TSGet(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(result.Value).To(BeEquivalentTo(100)) for i := 0; i < 100; i++ { _, err := client.TSDecrBy(ctx, "1", 1).Result() Expect(err).NotTo(HaveOccurred()) } result, err = client.TSGet(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(result.Value).To(BeEquivalentTo(0)) opt := &redis.TSIncrDecrOptions{Timestamp: 5} _, err = client.TSIncrByWithArgs(ctx, "2", 1.5, opt).Result() Expect(err).NotTo(HaveOccurred()) result, err = client.TSGet(ctx, "2").Result() Expect(err).NotTo(HaveOccurred()) Expect(result.Timestamp).To(BeEquivalentTo(5)) Expect(result.Value).To(BeEquivalentTo(1.5)) opt = &redis.TSIncrDecrOptions{Timestamp: 7} _, err = client.TSIncrByWithArgs(ctx, "2", 2.25, opt).Result() Expect(err).NotTo(HaveOccurred()) result, err = client.TSGet(ctx, "2").Result() Expect(err).NotTo(HaveOccurred()) Expect(result.Timestamp).To(BeEquivalentTo(7)) Expect(result.Value).To(BeEquivalentTo(3.75)) opt = &redis.TSIncrDecrOptions{Timestamp: 15} _, err = client.TSDecrByWithArgs(ctx, "2", 1.5, opt).Result() Expect(err).NotTo(HaveOccurred()) result, err = client.TSGet(ctx, "2").Result() Expect(err).NotTo(HaveOccurred()) Expect(result.Timestamp).To(BeEquivalentTo(15)) Expect(result.Value).To(BeEquivalentTo(2.25)) // Test chunk size INCRBY opt = &redis.TSIncrDecrOptions{ChunkSize: 128} _, err = client.TSIncrByWithArgs(ctx, "3", 10, opt).Result() Expect(err).NotTo(HaveOccurred()) resultInfo, err := client.TSInfo(ctx, "3").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128)) // Test chunk size DECRBY opt = &redis.TSIncrDecrOptions{ChunkSize: 128} _, err = client.TSDecrByWithArgs(ctx, "4", 10, opt).Result() Expect(err).NotTo(HaveOccurred()) resultInfo, err = client.TSInfo(ctx, "4").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128)) // Test insertion filters INCRBY opt = &redis.TSIncrDecrOptions{Timestamp: 1000, IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0, DuplicatePolicy: "LAST"} res, err := client.TSIncrByWithArgs(ctx, "ts-if-1", 1.0, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeEquivalentTo(1000)) res, err = client.TSIncrByWithArgs(ctx, "ts-if-1", 3.0, &redis.TSIncrDecrOptions{Timestamp: 1000}).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeEquivalentTo(1000)) rangePoints, err := client.TSRange(ctx, "ts-if-1", 1000, 1004).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(rangePoints)).To(BeEquivalentTo(1)) Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: 1.0}})) res, err = client.TSIncrByWithArgs(ctx, "ts-if-1", 10.1, &redis.TSIncrDecrOptions{Timestamp: 1000}).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeEquivalentTo(1000)) rangePoints, err = client.TSRange(ctx, "ts-if-1", 1000, 1004).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(rangePoints)).To(BeEquivalentTo(1)) Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: 11.1}})) // Test insertion filters DECRBY opt = &redis.TSIncrDecrOptions{Timestamp: 1000, IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0, DuplicatePolicy: "LAST"} res, err = client.TSDecrByWithArgs(ctx, "ts-if-2", 1.0, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeEquivalentTo(1000)) res, err = client.TSDecrByWithArgs(ctx, "ts-if-2", 3.0, &redis.TSIncrDecrOptions{Timestamp: 1000}).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeEquivalentTo(1000)) rangePoints, err = client.TSRange(ctx, "ts-if-2", 1000, 1004).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(rangePoints)).To(BeEquivalentTo(1)) Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: -1.0}})) res, err = client.TSDecrByWithArgs(ctx, "ts-if-2", 10.1, &redis.TSIncrDecrOptions{Timestamp: 1000}).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(BeEquivalentTo(1000)) rangePoints, err = client.TSRange(ctx, "ts-if-2", 1000, 1004).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(rangePoints)).To(BeEquivalentTo(1)) Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: -11.1}})) }) It("should TSGet", Label("timeseries", "tsget"), func() { opt := &redis.TSOptions{DuplicatePolicy: "max"} resultGet, err := client.TSAddWithArgs(ctx, "foo", 2265985, 151, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet).To(BeEquivalentTo(2265985)) result, err := client.TSGet(ctx, "foo").Result() Expect(err).NotTo(HaveOccurred()) Expect(result.Timestamp).To(BeEquivalentTo(2265985)) Expect(result.Value).To(BeEquivalentTo(151)) }) It("should TSGet Latest", Label("timeseries", "tsgetlatest", "NonRedisEnterprise"), func() { resultGet, err := client.TSCreate(ctx, "tsgl-1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet).To(BeEquivalentTo("OK")) resultGet, err = client.TSCreate(ctx, "tsgl-2").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet).To(BeEquivalentTo("OK")) resultGet, err = client.TSCreateRule(ctx, "tsgl-1", "tsgl-2", redis.Sum, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet).To(BeEquivalentTo("OK")) _, err = client.TSAdd(ctx, "tsgl-1", 1, 1).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "tsgl-1", 2, 3).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "tsgl-1", 11, 7).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "tsgl-1", 13, 1).Result() Expect(err).NotTo(HaveOccurred()) result, errGet := client.TSGet(ctx, "tsgl-2").Result() Expect(errGet).NotTo(HaveOccurred()) Expect(result.Timestamp).To(BeEquivalentTo(0)) Expect(result.Value).To(BeEquivalentTo(4)) result, errGet = client.TSGetWithArgs(ctx, "tsgl-2", &redis.TSGetOptions{Latest: true}).Result() Expect(errGet).NotTo(HaveOccurred()) Expect(result.Timestamp).To(BeEquivalentTo(10)) Expect(result.Value).To(BeEquivalentTo(8)) }) It("should TSInfo", Label("timeseries", "tsinfo"), func() { resultGet, err := client.TSAdd(ctx, "foo", 2265985, 151).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet).To(BeEquivalentTo(2265985)) result, err := client.TSInfo(ctx, "foo").Result() Expect(err).NotTo(HaveOccurred()) Expect(result["firstTimestamp"]).To(BeEquivalentTo(2265985)) }) It("should TSMAdd", Label("timeseries", "tsmadd"), func() { resultGet, err := client.TSCreate(ctx, "a").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet).To(BeEquivalentTo("OK")) ktvSlices := make([][]interface{}, 3) for i := 0; i < 3; i++ { ktvSlices[i] = make([]interface{}, 3) ktvSlices[i][0] = "a" for j := 1; j < 3; j++ { ktvSlices[i][j] = (i + j) * j } } result, err := client.TSMAdd(ctx, ktvSlices).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo([]int64{1, 2, 3})) }) It("should TSMGet and TSMGetWithArgs", Label("timeseries", "tsmget", "tsmgetWithArgs", "NonRedisEnterprise"), func() { opt := &redis.TSOptions{Labels: map[string]string{"Test": "This"}} resultCreate, err := client.TSCreateWithArgs(ctx, "a", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) opt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That"}} resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) _, err = client.TSAdd(ctx, "a", "*", 15).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "b", "*", 25).Result() Expect(err).NotTo(HaveOccurred()) result, err := client.TSMGet(ctx, []string{"Test=This"}).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][1].([]interface{})[1]).To(BeEquivalentTo("15")) Expect(result["b"][1].([]interface{})[1]).To(BeEquivalentTo("25")) } else { Expect(result["a"][1].([]interface{})[1]).To(BeEquivalentTo(15)) Expect(result["b"][1].([]interface{})[1]).To(BeEquivalentTo(25)) } mgetOpt := &redis.TSMGetOptions{WithLabels: true} result, err = client.TSMGetWithArgs(ctx, []string{"Test=This"}, mgetOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["b"][0]).To(ConsistOf([]interface{}{"Test", "This"}, []interface{}{"Taste", "That"})) } else { Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "Taste": "That"})) } resultCreate, err = client.TSCreate(ctx, "c").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}} resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultCreateRule, err := client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreateRule).To(BeEquivalentTo("OK")) _, err = client.TSAdd(ctx, "c", 1, 1).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 2, 3).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 11, 7).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 13, 1).Result() Expect(err).NotTo(HaveOccurred()) result, err = client.TSMGet(ctx, []string{"is_compaction=true"}).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{int64(0), "4"})) } else { Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{int64(0), 4.0})) } mgetOpt = &redis.TSMGetOptions{Latest: true} result, err = client.TSMGetWithArgs(ctx, []string{"is_compaction=true"}, mgetOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{int64(10), "8"})) } else { Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{int64(10), 8.0})) } }) It("should TSQueryIndex", Label("timeseries", "tsqueryindex"), func() { opt := &redis.TSOptions{Labels: map[string]string{"Test": "This"}} resultCreate, err := client.TSCreateWithArgs(ctx, "a", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) opt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That"}} resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) result, err := client.TSQueryIndex(ctx, []string{"Test=This"}).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) result, err = client.TSQueryIndex(ctx, []string{"Taste=That"}).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(1)) }) It("should TSDel and TSRange", Label("timeseries", "tsdel", "tsrange"), func() { for i := 0; i < 100; i++ { _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result() Expect(err).NotTo(HaveOccurred()) } resultDelete, err := client.TSDel(ctx, "a", 0, 21).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultDelete).To(BeEquivalentTo(22)) resultRange, err := client.TSRange(ctx, "a", 0, 21).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange).To(BeEquivalentTo([]redis.TSTimestampValue{})) resultRange, err = client.TSRange(ctx, "a", 22, 22).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 22, Value: 1})) }) It("should TSRange, TSRangeWithArgs", Label("timeseries", "tsrange", "tsrangeWithArgs", "NonRedisEnterprise"), func() { for i := 0; i < 100; i++ { _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result() Expect(err).NotTo(HaveOccurred()) } result, err := client.TSRange(ctx, "a", 0, 200).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(100)) for i := 0; i < 100; i++ { client.TSAdd(ctx, "a", i+200, float64(i%7)) } result, err = client.TSRange(ctx, "a", 0, 500).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(200)) fts := make([]int, 0) for i := 10; i < 20; i++ { fts = append(fts, i) } opt := &redis.TSRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}} result, err = client.TSRangeWithArgs(ctx, "a", 0, 500, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) opt = &redis.TSRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "+"} result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 10}, {Timestamp: 10, Value: 1}})) opt = &redis.TSRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "5"} result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 5}, {Timestamp: 5, Value: 6}})) opt = &redis.TSRangeOptions{Aggregator: redis.Twa, BucketDuration: 10} result, err = client.TSRangeWithArgs(ctx, "a", 0, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 0, Value: 2.55}, {Timestamp: 10, Value: 3}})) // Test Range Latest resultCreate, err := client.TSCreate(ctx, "t1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultCreate, err = client.TSCreate(ctx, "t2").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultRule, err := client.TSCreateRule(ctx, "t1", "t2", redis.Sum, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRule).To(BeEquivalentTo("OK")) _, errAdd := client.TSAdd(ctx, "t1", 1, 1).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t1", 2, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t1", 11, 7).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t1", 13, 1).Result() Expect(errAdd).NotTo(HaveOccurred()) resultRange, err := client.TSRange(ctx, "t1", 0, 20).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 1, Value: 1})) opt = &redis.TSRangeOptions{Latest: true} resultRange, err = client.TSRangeWithArgs(ctx, "t2", 0, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4})) // Test Bucket Timestamp resultCreate, err = client.TSCreate(ctx, "t3").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) _, errAdd = client.TSAdd(ctx, "t3", 15, 1).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t3", 17, 4).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t3", 51, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t3", 73, 5).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t3", 75, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10} resultRange, err = client.TSRangeWithArgs(ctx, "t3", 0, 100, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4})) Expect(len(resultRange)).To(BeEquivalentTo(3)) opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, BucketTimestamp: "+"} resultRange, err = client.TSRangeWithArgs(ctx, "t3", 0, 100, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 20, Value: 4})) Expect(len(resultRange)).To(BeEquivalentTo(3)) // Test Empty _, errAdd = client.TSAdd(ctx, "t4", 15, 1).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t4", 17, 4).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t4", 51, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t4", 73, 5).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t4", 75, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10} resultRange, err = client.TSRangeWithArgs(ctx, "t4", 0, 100, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4})) Expect(len(resultRange)).To(BeEquivalentTo(3)) opt = &redis.TSRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, Empty: true} resultRange, err = client.TSRangeWithArgs(ctx, "t4", 0, 100, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 4})) Expect(len(resultRange)).To(BeEquivalentTo(7)) }) It("should TSRevRange, TSRevRangeWithArgs", Label("timeseries", "tsrevrange", "tsrevrangeWithArgs", "NonRedisEnterprise"), func() { for i := 0; i < 100; i++ { _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result() Expect(err).NotTo(HaveOccurred()) } result, err := client.TSRange(ctx, "a", 0, 200).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(100)) for i := 0; i < 100; i++ { client.TSAdd(ctx, "a", i+200, float64(i%7)) } result, err = client.TSRange(ctx, "a", 0, 500).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(200)) opt := &redis.TSRevRangeOptions{Aggregator: redis.Avg, BucketDuration: 10} result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(20)) opt = &redis.TSRevRangeOptions{Count: 10} result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(10)) fts := make([]int, 0) for i := 10; i < 20; i++ { fts = append(fts, i) } opt = &redis.TSRevRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}} result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 500, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) opt = &redis.TSRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "+"} result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 10, Value: 1}, {Timestamp: 0, Value: 10}})) opt = &redis.TSRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "1"} result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1, Value: 10}, {Timestamp: 0, Value: 1}})) opt = &redis.TSRevRangeOptions{Aggregator: redis.Twa, BucketDuration: 10} result, err = client.TSRevRangeWithArgs(ctx, "a", 0, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 10, Value: 3}, {Timestamp: 0, Value: 2.55}})) // Test Range Latest resultCreate, err := client.TSCreate(ctx, "t1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultCreate, err = client.TSCreate(ctx, "t2").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultRule, err := client.TSCreateRule(ctx, "t1", "t2", redis.Sum, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRule).To(BeEquivalentTo("OK")) _, errAdd := client.TSAdd(ctx, "t1", 1, 1).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t1", 2, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t1", 11, 7).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t1", 13, 1).Result() Expect(errAdd).NotTo(HaveOccurred()) resultRange, err := client.TSRange(ctx, "t2", 0, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4})) opt = &redis.TSRevRangeOptions{Latest: true} resultRange, err = client.TSRevRangeWithArgs(ctx, "t2", 0, 10, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 10, Value: 8})) resultRange, err = client.TSRevRangeWithArgs(ctx, "t2", 0, 9, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 0, Value: 4})) // Test Bucket Timestamp resultCreate, err = client.TSCreate(ctx, "t3").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) _, errAdd = client.TSAdd(ctx, "t3", 15, 1).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t3", 17, 4).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t3", 51, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t3", 73, 5).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t3", 75, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10} resultRange, err = client.TSRevRangeWithArgs(ctx, "t3", 0, 100, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5})) Expect(len(resultRange)).To(BeEquivalentTo(3)) opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, BucketTimestamp: "+"} resultRange, err = client.TSRevRangeWithArgs(ctx, "t3", 0, 100, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 80, Value: 5})) Expect(len(resultRange)).To(BeEquivalentTo(3)) // Test Empty _, errAdd = client.TSAdd(ctx, "t4", 15, 1).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t4", 17, 4).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t4", 51, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t4", 73, 5).Result() Expect(errAdd).NotTo(HaveOccurred()) _, errAdd = client.TSAdd(ctx, "t4", 75, 3).Result() Expect(errAdd).NotTo(HaveOccurred()) opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10} resultRange, err = client.TSRevRangeWithArgs(ctx, "t4", 0, 100, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5})) Expect(len(resultRange)).To(BeEquivalentTo(3)) opt = &redis.TSRevRangeOptions{Aggregator: redis.Max, Align: 0, BucketDuration: 10, Empty: true} resultRange, err = client.TSRevRangeWithArgs(ctx, "t4", 0, 100, opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultRange[0]).To(BeEquivalentTo(redis.TSTimestampValue{Timestamp: 70, Value: 5})) Expect(len(resultRange)).To(BeEquivalentTo(7)) }) It("should TSMRange and TSMRangeWithArgs", Label("timeseries", "tsmrange", "tsmrangeWithArgs"), func() { createOpt := &redis.TSOptions{Labels: map[string]string{"Test": "This", "team": "ny"}} resultCreate, err := client.TSCreateWithArgs(ctx, "a", createOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) createOpt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That", "team": "sf"}} resultCreate, err = client.TSCreateWithArgs(ctx, "b", createOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) for i := 0; i < 100; i++ { _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "b", i, float64(i%11)).Result() Expect(err).NotTo(HaveOccurred()) } result, err := client.TSMRange(ctx, 0, 200, []string{"Test=This"}).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) if client.Options().Protocol == 2 { Expect(len(result["a"][1].([]interface{}))).To(BeEquivalentTo(100)) } else { Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(100)) } // Test Count mrangeOpt := &redis.TSMRangeOptions{Count: 10} result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(len(result["a"][1].([]interface{}))).To(BeEquivalentTo(10)) } else { Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(10)) } // Test Aggregation and BucketDuration for i := 0; i < 100; i++ { _, err := client.TSAdd(ctx, "a", i+200, float64(i%7)).Result() Expect(err).NotTo(HaveOccurred()) } mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Avg, BucketDuration: 10} result, err = client.TSMRangeWithArgs(ctx, 0, 500, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) if client.Options().Protocol == 2 { Expect(len(result["a"][1].([]interface{}))).To(BeEquivalentTo(20)) } else { Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(20)) } // Test WithLabels if client.Options().Protocol == 2 { Expect(result["a"][0]).To(BeEquivalentTo([]interface{}{})) } else { Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{})) } mrangeOpt = &redis.TSMRangeOptions{WithLabels: true} result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][0]).To(ConsistOf([]interface{}{[]interface{}{"Test", "This"}, []interface{}{"team", "ny"}})) } else { Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "team": "ny"})) } // Test SelectedLabels mrangeOpt = &redis.TSMRangeOptions{SelectedLabels: []interface{}{"team"}} result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][0].([]interface{})[0]).To(BeEquivalentTo([]interface{}{"team", "ny"})) Expect(result["b"][0].([]interface{})[0]).To(BeEquivalentTo([]interface{}{"team", "sf"})) } else { Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "ny"})) Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "sf"})) } // Test FilterBy fts := make([]int, 0) for i := 10; i < 20; i++ { fts = append(fts, i) } mrangeOpt = &redis.TSMRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}} result, err = client.TSMRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][1].([]interface{})).To(BeEquivalentTo([]interface{}{[]interface{}{int64(15), "1"}, []interface{}{int64(16), "2"}})) } else { Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(15), 1.0}, []interface{}{int64(16), 2.0}})) } // Test GroupBy mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "Test", Reducer: "sum"} result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["Test=This"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), "0"}, []interface{}{int64(1), "2"}, []interface{}{int64(2), "4"}, []interface{}{int64(3), "6"}})) } else { Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 2.0}, []interface{}{int64(2), 4.0}, []interface{}{int64(3), 6.0}})) } mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "Test", Reducer: "max"} result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["Test=This"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), "0"}, []interface{}{int64(1), "1"}, []interface{}{int64(2), "2"}, []interface{}{int64(3), "3"}})) } else { Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}})) } mrangeOpt = &redis.TSMRangeOptions{GroupByLabel: "team", Reducer: "min"} result, err = client.TSMRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) if client.Options().Protocol == 2 { Expect(result["team=ny"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), "0"}, []interface{}{int64(1), "1"}, []interface{}{int64(2), "2"}, []interface{}{int64(3), "3"}})) Expect(result["team=sf"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), "0"}, []interface{}{int64(1), "1"}, []interface{}{int64(2), "2"}, []interface{}{int64(3), "3"}})) } else { Expect(result["team=ny"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}})) Expect(result["team=sf"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 0.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(3), 3.0}})) } // Test Align mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "-"} result, err = client.TSMRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), "10"}, []interface{}{int64(10), "1"}})) } else { Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 10.0}, []interface{}{int64(10), 1.0}})) } mrangeOpt = &redis.TSMRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: 5} result, err = client.TSMRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), "5"}, []interface{}{int64(5), "6"}})) } else { Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 5.0}, []interface{}{int64(5), 6.0}})) } }) It("should TSMRangeWithArgs Latest", Label("timeseries", "tsmrangeWithArgs", "tsmrangelatest", "NonRedisEnterprise"), func() { resultCreate, err := client.TSCreate(ctx, "a").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) opt := &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}} resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultCreate, err = client.TSCreate(ctx, "c").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}} resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultCreateRule, err := client.TSCreateRule(ctx, "a", "b", redis.Sum, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreateRule).To(BeEquivalentTo("OK")) resultCreateRule, err = client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreateRule).To(BeEquivalentTo("OK")) _, err = client.TSAdd(ctx, "a", 1, 1).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "a", 2, 3).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "a", 11, 7).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "a", 13, 1).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 1, 1).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 2, 3).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 11, 7).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 13, 1).Result() Expect(err).NotTo(HaveOccurred()) mrangeOpt := &redis.TSMRangeOptions{Latest: true} result, err := client.TSMRangeWithArgs(ctx, 0, 10, []string{"is_compaction=true"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["b"][1]).To(ConsistOf([]interface{}{int64(0), "4"}, []interface{}{int64(10), "8"})) Expect(result["d"][1]).To(ConsistOf([]interface{}{int64(0), "4"}, []interface{}{int64(10), "8"})) } else { Expect(result["b"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 4.0}, []interface{}{int64(10), 8.0}})) Expect(result["d"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(0), 4.0}, []interface{}{int64(10), 8.0}})) } }) It("should TSMRevRange and TSMRevRangeWithArgs", Label("timeseries", "tsmrevrange", "tsmrevrangeWithArgs"), func() { createOpt := &redis.TSOptions{Labels: map[string]string{"Test": "This", "team": "ny"}} resultCreate, err := client.TSCreateWithArgs(ctx, "a", createOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) createOpt = &redis.TSOptions{Labels: map[string]string{"Test": "This", "Taste": "That", "team": "sf"}} resultCreate, err = client.TSCreateWithArgs(ctx, "b", createOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) for i := 0; i < 100; i++ { _, err := client.TSAdd(ctx, "a", i, float64(i%7)).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "b", i, float64(i%11)).Result() Expect(err).NotTo(HaveOccurred()) } result, err := client.TSMRevRange(ctx, 0, 200, []string{"Test=This"}).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) if client.Options().Protocol == 2 { Expect(len(result["a"][1].([]interface{}))).To(BeEquivalentTo(100)) } else { Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(100)) } // Test Count mrangeOpt := &redis.TSMRevRangeOptions{Count: 10} result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(len(result["a"][1].([]interface{}))).To(BeEquivalentTo(10)) } else { Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(10)) } // Test Aggregation and BucketDuration for i := 0; i < 100; i++ { _, err := client.TSAdd(ctx, "a", i+200, float64(i%7)).Result() Expect(err).NotTo(HaveOccurred()) } mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Avg, BucketDuration: 10} result, err = client.TSMRevRangeWithArgs(ctx, 0, 500, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) if client.Options().Protocol == 2 { Expect(len(result["a"][1].([]interface{}))).To(BeEquivalentTo(20)) Expect(result["a"][0]).To(BeEquivalentTo([]interface{}{})) } else { Expect(len(result["a"][2].([]interface{}))).To(BeEquivalentTo(20)) Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{})) } mrangeOpt = &redis.TSMRevRangeOptions{WithLabels: true} result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][0]).To(ConsistOf([]interface{}{[]interface{}{"Test", "This"}, []interface{}{"team", "ny"}})) } else { Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"Test": "This", "team": "ny"})) } // Test SelectedLabels mrangeOpt = &redis.TSMRevRangeOptions{SelectedLabels: []interface{}{"team"}} result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][0].([]interface{})[0]).To(BeEquivalentTo([]interface{}{"team", "ny"})) Expect(result["b"][0].([]interface{})[0]).To(BeEquivalentTo([]interface{}{"team", "sf"})) } else { Expect(result["a"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "ny"})) Expect(result["b"][0]).To(BeEquivalentTo(map[interface{}]interface{}{"team": "sf"})) } // Test FilterBy fts := make([]int, 0) for i := 10; i < 20; i++ { fts = append(fts, i) } mrangeOpt = &redis.TSMRevRangeOptions{FilterByTS: fts, FilterByValue: []int{1, 2}} result, err = client.TSMRevRangeWithArgs(ctx, 0, 200, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][1].([]interface{})).To(ConsistOf([]interface{}{int64(16), "2"}, []interface{}{int64(15), "1"})) } else { Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(16), 2.0}, []interface{}{int64(15), 1.0}})) } // Test GroupBy mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "Test", Reducer: "sum"} result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["Test=This"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), "6"}, []interface{}{int64(2), "4"}, []interface{}{int64(1), "2"}, []interface{}{int64(0), "0"}})) } else { Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 6.0}, []interface{}{int64(2), 4.0}, []interface{}{int64(1), 2.0}, []interface{}{int64(0), 0.0}})) } mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "Test", Reducer: "max"} result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["Test=This"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), "3"}, []interface{}{int64(2), "2"}, []interface{}{int64(1), "1"}, []interface{}{int64(0), "0"}})) } else { Expect(result["Test=This"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}})) } mrangeOpt = &redis.TSMRevRangeOptions{GroupByLabel: "team", Reducer: "min"} result, err = client.TSMRevRangeWithArgs(ctx, 0, 3, []string{"Test=This"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(result)).To(BeEquivalentTo(2)) if client.Options().Protocol == 2 { Expect(result["team=ny"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), "3"}, []interface{}{int64(2), "2"}, []interface{}{int64(1), "1"}, []interface{}{int64(0), "0"}})) Expect(result["team=sf"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), "3"}, []interface{}{int64(2), "2"}, []interface{}{int64(1), "1"}, []interface{}{int64(0), "0"}})) } else { Expect(result["team=ny"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}})) Expect(result["team=sf"][3]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(3), 3.0}, []interface{}{int64(2), 2.0}, []interface{}{int64(1), 1.0}, []interface{}{int64(0), 0.0}})) } // Test Align mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: "-"} result, err = client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), "1"}, []interface{}{int64(0), "10"}})) } else { Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 1.0}, []interface{}{int64(0), 10.0}})) } mrangeOpt = &redis.TSMRevRangeOptions{Aggregator: redis.Count, BucketDuration: 10, Align: 1} result, err = client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"team=ny"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["a"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(1), "10"}, []interface{}{int64(0), "1"}})) } else { Expect(result["a"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(1), 10.0}, []interface{}{int64(0), 1.0}})) } }) It("should TSMRevRangeWithArgs Latest", Label("timeseries", "tsmrevrangeWithArgs", "tsmrevrangelatest", "NonRedisEnterprise"), func() { resultCreate, err := client.TSCreate(ctx, "a").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) opt := &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}} resultCreate, err = client.TSCreateWithArgs(ctx, "b", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultCreate, err = client.TSCreate(ctx, "c").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) opt = &redis.TSOptions{Labels: map[string]string{"is_compaction": "true"}} resultCreate, err = client.TSCreateWithArgs(ctx, "d", opt).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreate).To(BeEquivalentTo("OK")) resultCreateRule, err := client.TSCreateRule(ctx, "a", "b", redis.Sum, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreateRule).To(BeEquivalentTo("OK")) resultCreateRule, err = client.TSCreateRule(ctx, "c", "d", redis.Sum, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(resultCreateRule).To(BeEquivalentTo("OK")) _, err = client.TSAdd(ctx, "a", 1, 1).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "a", 2, 3).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "a", 11, 7).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "a", 13, 1).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 1, 1).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 2, 3).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 11, 7).Result() Expect(err).NotTo(HaveOccurred()) _, err = client.TSAdd(ctx, "c", 13, 1).Result() Expect(err).NotTo(HaveOccurred()) mrangeOpt := &redis.TSMRevRangeOptions{Latest: true} result, err := client.TSMRevRangeWithArgs(ctx, 0, 10, []string{"is_compaction=true"}, mrangeOpt).Result() Expect(err).NotTo(HaveOccurred()) if client.Options().Protocol == 2 { Expect(result["b"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), "8"}, []interface{}{int64(0), "4"}})) Expect(result["d"][1]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), "8"}, []interface{}{int64(0), "4"}})) } else { Expect(result["b"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 8.0}, []interface{}{int64(0), 4.0}})) Expect(result["d"][2]).To(BeEquivalentTo([]interface{}{[]interface{}{int64(10), 8.0}, []interface{}{int64(0), 4.0}})) } }) }) } }) dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/osscluster_test.go0000644000000000000000000013067415024302467025524 0ustar rootrootpackage redis_test import ( "context" "crypto/tls" "errors" "fmt" "net" "slices" "strconv" "strings" "sync" "time" . "github.com/bsm/ginkgo/v2" . "github.com/bsm/gomega" "github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9/internal/hashtag" ) type clusterScenario struct { ports []string nodeIDs []string clients map[string]*redis.Client } func (s *clusterScenario) slots() []int { return []int{0, 5461, 10923, 16384} } func (s *clusterScenario) masters() []*redis.Client { result := make([]*redis.Client, 3) for pos, port := range s.ports[:3] { result[pos] = s.clients[port] } return result } func (s *clusterScenario) slaves() []*redis.Client { result := make([]*redis.Client, 3) for pos, port := range s.ports[3:] { result[pos] = s.clients[port] } return result } func (s *clusterScenario) addrs() []string { addrs := make([]string, len(s.ports)) for i, port := range s.ports { addrs[i] = net.JoinHostPort("127.0.0.1", port) } return addrs } func (s *clusterScenario) newClusterClientUnstable(opt *redis.ClusterOptions) *redis.ClusterClient { opt.Addrs = s.addrs() return redis.NewClusterClient(opt) } func (s *clusterScenario) newClusterClient( ctx context.Context, opt *redis.ClusterOptions, ) *redis.ClusterClient { client := s.newClusterClientUnstable(opt) err := eventually(func() error { if opt.ClusterSlots != nil { return nil } state, err := client.LoadState(ctx) if err != nil { return err } if !state.IsConsistent(ctx) { return fmt.Errorf("cluster state is not consistent") } return nil }, 30*time.Second) if err != nil { panic(err) } return client } func (s *clusterScenario) Close() error { ctx := context.TODO() for _, master := range s.masters() { if master == nil { continue } err := master.FlushAll(ctx).Err() if err != nil { return err } // since 7.2 forget calls should be propagated, calling only master // nodes should be sufficient. for _, nID := range s.nodeIDs { master.ClusterForget(ctx, nID) } } return nil } func configureClusterTopology(ctx context.Context, scenario *clusterScenario) error { allowErrs := []string{ "ERR Slot 0 is already busy", "ERR Slot 5461 is already busy", "ERR Slot 10923 is already busy", "ERR Slot 16384 is already busy", } err := collectNodeInformation(ctx, scenario) if err != nil { return err } // Meet cluster nodes. for _, client := range scenario.clients { err := client.ClusterMeet(ctx, "127.0.0.1", scenario.ports[0]).Err() if err != nil { return err } } slots := scenario.slots() for pos, master := range scenario.masters() { err := master.ClusterAddSlotsRange(ctx, slots[pos], slots[pos+1]-1).Err() if err != nil && slices.Contains(allowErrs, err.Error()) == false { return err } } // Bootstrap slaves. for idx, slave := range scenario.slaves() { masterID := scenario.nodeIDs[idx] // Wait until master is available err := eventually(func() error { s := slave.ClusterNodes(ctx).Val() wanted := masterID if !strings.Contains(s, wanted) { return fmt.Errorf("%q does not contain %q", s, wanted) } return nil }, 10*time.Second) if err != nil { return err } err = slave.ClusterReplicate(ctx, masterID).Err() if err != nil { return err } } // Wait until all nodes have consistent info. wanted := []redis.ClusterSlot{{ Start: 0, End: 5460, Nodes: []redis.ClusterNode{{ ID: "", Addr: "127.0.0.1:16600", }, { ID: "", Addr: "127.0.0.1:16603", }}, }, { Start: 5461, End: 10922, Nodes: []redis.ClusterNode{{ ID: "", Addr: "127.0.0.1:16601", }, { ID: "", Addr: "127.0.0.1:16604", }}, }, { Start: 10923, End: 16383, Nodes: []redis.ClusterNode{{ ID: "", Addr: "127.0.0.1:16602", }, { ID: "", Addr: "127.0.0.1:16605", }}, }} for _, client := range scenario.clients { err := eventually(func() error { res, err := client.ClusterSlots(ctx).Result() if err != nil { return err } return assertSlotsEqual(res, wanted) }, 90*time.Second) if err != nil { return err } } return nil } func collectNodeInformation(ctx context.Context, scenario *clusterScenario) error { for pos, port := range scenario.ports { client := redis.NewClient(&redis.Options{ Addr: ":" + port, }) myID, err := client.ClusterMyID(ctx).Result() if err != nil { return err } scenario.clients[port] = client scenario.nodeIDs[pos] = myID } return nil } func assertSlotsEqual(slots, wanted []redis.ClusterSlot) error { outerLoop: for _, s2 := range wanted { for _, s1 := range slots { if slotEqual(s1, s2) { continue outerLoop } } return fmt.Errorf("%v not found in %v", s2, slots) } return nil } func slotEqual(s1, s2 redis.ClusterSlot) bool { if s1.Start != s2.Start { return false } if s1.End != s2.End { return false } if len(s1.Nodes) != len(s2.Nodes) { return false } for i, n1 := range s1.Nodes { if n1.Addr != s2.Nodes[i].Addr { return false } } return true } //------------------------------------------------------------------------------ var _ = Describe("ClusterClient", func() { var failover bool var opt *redis.ClusterOptions var client *redis.ClusterClient assertClusterClient := func() { It("should GET/SET/DEL", func() { err := client.Get(ctx, "A").Err() Expect(err).To(Equal(redis.Nil)) err = client.Set(ctx, "A", "VALUE", 0).Err() Expect(err).NotTo(HaveOccurred()) Eventually(func() string { return client.Get(ctx, "A").Val() }, 30*time.Second).Should(Equal("VALUE")) cnt, err := client.Del(ctx, "A").Result() Expect(err).NotTo(HaveOccurred()) Expect(cnt).To(Equal(int64(1))) }) It("GET follows redirects", func() { err := client.Set(ctx, "A", "VALUE", 0).Err() Expect(err).NotTo(HaveOccurred()) if !failover { Eventually(func() int64 { nodes, err := client.Nodes(ctx, "A") if err != nil { return 0 } return nodes[1].Client.DBSize(ctx).Val() }, 30*time.Second).Should(Equal(int64(1))) Eventually(func() error { return client.SwapNodes(ctx, "A") }, 30*time.Second).ShouldNot(HaveOccurred()) } v, err := client.Get(ctx, "A").Result() Expect(err).NotTo(HaveOccurred()) Expect(v).To(Equal("VALUE")) }) It("SET follows redirects", func() { if !failover { Eventually(func() error { return client.SwapNodes(ctx, "A") }, 30*time.Second).ShouldNot(HaveOccurred()) } err := client.Set(ctx, "A", "VALUE", 0).Err() Expect(err).NotTo(HaveOccurred()) v, err := client.Get(ctx, "A").Result() Expect(err).NotTo(HaveOccurred()) Expect(v).To(Equal("VALUE")) }) It("distributes keys", func() { for i := 0; i < 100; i++ { err := client.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err() Expect(err).NotTo(HaveOccurred()) } err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { defer GinkgoRecover() Eventually(func() string { return master.Info(ctx, "keyspace").Val() }, 30*time.Second).Should(Or( ContainSubstring("keys=32"), ContainSubstring("keys=36"), ContainSubstring("keys=32"), )) return nil }) Expect(err).NotTo(HaveOccurred()) }) It("distributes keys when using EVAL", func() { script := redis.NewScript(` local r = redis.call('SET', KEYS[1], ARGV[1]) return r `) var key string for i := 0; i < 100; i++ { key = fmt.Sprintf("key%d", i) err := script.Run(ctx, client, []string{key}, "value").Err() Expect(err).NotTo(HaveOccurred()) } err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { defer GinkgoRecover() Eventually(func() string { return master.Info(ctx, "keyspace").Val() }, 30*time.Second).Should(Or( ContainSubstring("keys=32"), ContainSubstring("keys=36"), ContainSubstring("keys=32"), )) return nil }) Expect(err).NotTo(HaveOccurred()) }) It("distributes scripts when using Script Load", func() { client.ScriptFlush(ctx) script := redis.NewScript(`return 'Unique script'`) script.Load(ctx, client) err := client.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error { defer GinkgoRecover() val, _ := script.Exists(ctx, shard).Result() Expect(val[0]).To(Equal(true)) return nil }) Expect(err).NotTo(HaveOccurred()) }) It("checks all shards when using Script Exists", func() { client.ScriptFlush(ctx) script := redis.NewScript(`return 'First script'`) lostScriptSrc := `return 'Lost script'` lostScript := redis.NewScript(lostScriptSrc) script.Load(ctx, client) client.Do(ctx, "script", "load", lostScriptSrc) val, _ := client.ScriptExists(ctx, script.Hash(), lostScript.Hash()).Result() Expect(val).To(Equal([]bool{true, false})) }) It("flushes scripts from all shards when using ScriptFlush", func() { script := redis.NewScript(`return 'Unnecessary script'`) script.Load(ctx, client) val, _ := client.ScriptExists(ctx, script.Hash()).Result() Expect(val).To(Equal([]bool{true})) client.ScriptFlush(ctx) val, _ = client.ScriptExists(ctx, script.Hash()).Result() Expect(val).To(Equal([]bool{false})) }) It("supports Watch", func() { var incr func(string) error // Transactionally increments key using GET and SET commands. incr = func(key string) error { err := client.Watch(ctx, func(tx *redis.Tx) error { n, err := tx.Get(ctx, key).Int64() if err != nil && err != redis.Nil { return err } _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Set(ctx, key, strconv.FormatInt(n+1, 10), 0) return nil }) return err }, key) if err == redis.TxFailedErr { return incr(key) } return err } var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) go func() { defer GinkgoRecover() defer wg.Done() err := incr("key") Expect(err).NotTo(HaveOccurred()) }() } wg.Wait() Eventually(func() string { return client.Get(ctx, "key").Val() }, 30*time.Second).Should(Equal("100")) }) Describe("pipelining", func() { var pipe *redis.Pipeline assertPipeline := func() { keys := []string{"A", "B", "C", "D", "E", "F", "G"} It("follows redirects", func() { if !failover { for _, key := range keys { Eventually(func() error { return client.SwapNodes(ctx, key) }, 30*time.Second).ShouldNot(HaveOccurred()) } } for i, key := range keys { pipe.Set(ctx, key, key+"_value", 0) pipe.Expire(ctx, key, time.Duration(i+1)*time.Hour) } cmds, err := pipe.Exec(ctx) Expect(err).NotTo(HaveOccurred()) Expect(cmds).To(HaveLen(14)) _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { defer GinkgoRecover() Eventually(func() int64 { return node.DBSize(ctx).Val() }, 30*time.Second).ShouldNot(BeZero()) return nil }) if !failover { for _, key := range keys { Eventually(func() error { return client.SwapNodes(ctx, key) }, 30*time.Second).ShouldNot(HaveOccurred()) } } for _, key := range keys { pipe.Get(ctx, key) pipe.TTL(ctx, key) } cmds, err = pipe.Exec(ctx) Expect(err).NotTo(HaveOccurred()) Expect(cmds).To(HaveLen(14)) for i, key := range keys { get := cmds[i*2].(*redis.StringCmd) Expect(get.Val()).To(Equal(key + "_value")) ttl := cmds[(i*2)+1].(*redis.DurationCmd) dur := time.Duration(i+1) * time.Hour Expect(ttl.Val()).To(BeNumerically("~", dur, 30*time.Second)) } }) It("works with missing keys", func() { pipe.Set(ctx, "A", "A_value", 0) pipe.Set(ctx, "C", "C_value", 0) _, err := pipe.Exec(ctx) Expect(err).NotTo(HaveOccurred()) a := pipe.Get(ctx, "A") b := pipe.Get(ctx, "B") c := pipe.Get(ctx, "C") cmds, err := pipe.Exec(ctx) Expect(err).To(Equal(redis.Nil)) Expect(cmds).To(HaveLen(3)) Expect(a.Err()).NotTo(HaveOccurred()) Expect(a.Val()).To(Equal("A_value")) Expect(b.Err()).To(Equal(redis.Nil)) Expect(b.Val()).To(Equal("")) Expect(c.Err()).NotTo(HaveOccurred()) Expect(c.Val()).To(Equal("C_value")) }) } Describe("with Pipeline", func() { BeforeEach(func() { pipe = client.Pipeline().(*redis.Pipeline) }) AfterEach(func() {}) assertPipeline() It("doesn't fail node with context.Canceled error", func() { ctx, cancel := context.WithCancel(context.Background()) cancel() pipe.Set(ctx, "A", "A_value", 0) _, err := pipe.Exec(ctx) Expect(err).To(HaveOccurred()) Expect(errors.Is(err, context.Canceled)).To(BeTrue()) clientNodes, _ := client.Nodes(ctx, "A") for _, node := range clientNodes { Expect(node.Failing()).To(BeFalse()) } }) It("doesn't fail node with context.DeadlineExceeded error", func() { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) defer cancel() pipe.Set(ctx, "A", "A_value", 0) _, err := pipe.Exec(ctx) Expect(err).To(HaveOccurred()) Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) clientNodes, _ := client.Nodes(ctx, "A") for _, node := range clientNodes { Expect(node.Failing()).To(BeFalse()) } }) }) Describe("with TxPipeline", func() { BeforeEach(func() { pipe = client.TxPipeline().(*redis.Pipeline) }) AfterEach(func() {}) assertPipeline() }) }) It("supports PubSub", func() { pubsub := client.Subscribe(ctx, "mychannel") defer pubsub.Close() Eventually(func() error { _, err := client.Publish(ctx, "mychannel", "hello").Result() if err != nil { return err } msg, err := pubsub.ReceiveTimeout(ctx, time.Second) if err != nil { return err } _, ok := msg.(*redis.Message) if !ok { return fmt.Errorf("got %T, wanted *redis.Message", msg) } return nil }, 30*time.Second).ShouldNot(HaveOccurred()) }) It("supports sharded PubSub", func() { pubsub := client.SSubscribe(ctx, "mychannel") defer pubsub.Close() Eventually(func() error { _, err := client.SPublish(ctx, "mychannel", "hello").Result() if err != nil { return err } msg, err := pubsub.ReceiveTimeout(ctx, time.Second) if err != nil { return err } _, ok := msg.(*redis.Message) if !ok { return fmt.Errorf("got %T, wanted *redis.Message", msg) } return nil }, 30*time.Second).ShouldNot(HaveOccurred()) }) It("supports PubSub.Ping without channels", func() { pubsub := client.Subscribe(ctx) defer pubsub.Close() err := pubsub.Ping(ctx) Expect(err).NotTo(HaveOccurred()) }) } Describe("ClusterClient PROTO 2", func() { BeforeEach(func() { opt = redisClusterOptions() opt.Protocol = 2 client = cluster.newClusterClient(ctx, opt) err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { _ = client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(client.Close()).NotTo(HaveOccurred()) }) It("should CLUSTER PROTO 2", func() { _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { val, err := c.Do(ctx, "HELLO").Result() Expect(err).NotTo(HaveOccurred()) Expect(val).Should(ContainElements("proto", int64(2))) return nil }) }) }) Describe("ClusterClient", func() { BeforeEach(func() { opt = redisClusterOptions() opt.ClientName = "cluster_hi" client = cluster.newClusterClient(ctx, opt) err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { _ = client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(client.Close()).NotTo(HaveOccurred()) }) It("returns pool stats", func() { stats := client.PoolStats() Expect(stats).To(BeAssignableToTypeOf(&redis.PoolStats{})) }) It("returns an error when there are no attempts left", func() { opt := redisClusterOptions() opt.MaxRedirects = -1 client := cluster.newClusterClient(ctx, opt) Eventually(func() error { return client.SwapNodes(ctx, "A") }, 30*time.Second).ShouldNot(HaveOccurred()) err := client.Get(ctx, "A").Err() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("MOVED")) Expect(client.Close()).NotTo(HaveOccurred()) }) It("determines hash slots correctly for generic commands", func() { opt := redisClusterOptions() opt.MaxRedirects = -1 client := cluster.newClusterClient(ctx, opt) err := client.Do(ctx, "GET", "A").Err() Expect(err).To(Equal(redis.Nil)) err = client.Do(ctx, []byte("GET"), []byte("A")).Err() Expect(err).To(Equal(redis.Nil)) Eventually(func() error { return client.SwapNodes(ctx, "A") }, 30*time.Second).ShouldNot(HaveOccurred()) err = client.Do(ctx, "GET", "A").Err() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("MOVED")) err = client.Do(ctx, []byte("GET"), []byte("A")).Err() Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("MOVED")) Expect(client.Close()).NotTo(HaveOccurred()) }) It("follows node redirection immediately", func() { // Configure retry backoffs far in excess of the expected duration of redirection opt := redisClusterOptions() opt.MinRetryBackoff = 10 * time.Minute opt.MaxRetryBackoff = 20 * time.Minute client := cluster.newClusterClient(ctx, opt) Eventually(func() error { return client.SwapNodes(ctx, "A") }, 30*time.Second).ShouldNot(HaveOccurred()) // Note that this context sets a deadline more aggressive than the lowest possible bound // of the retry backoff; this verifies that redirection completes immediately. redirCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() err := client.Set(redirCtx, "A", "VALUE", 0).Err() Expect(err).NotTo(HaveOccurred()) v, err := client.Get(redirCtx, "A").Result() Expect(err).NotTo(HaveOccurred()) Expect(v).To(Equal("VALUE")) Expect(client.Close()).NotTo(HaveOccurred()) }) It("calls fn for every master node", func() { for i := 0; i < 10; i++ { Expect(client.Set(ctx, strconv.Itoa(i), "", 0).Err()).NotTo(HaveOccurred()) } err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) size, err := client.DBSize(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(size).To(Equal(int64(0))) }) It("should CLUSTER SLOTS", func() { res, err := client.ClusterSlots(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(HaveLen(3)) wanted := []redis.ClusterSlot{{ Start: 0, End: 5460, Nodes: []redis.ClusterNode{{ ID: "", Addr: "127.0.0.1:16600", }, { ID: "", Addr: "127.0.0.1:16603", }}, }, { Start: 5461, End: 10922, Nodes: []redis.ClusterNode{{ ID: "", Addr: "127.0.0.1:16601", }, { ID: "", Addr: "127.0.0.1:16604", }}, }, { Start: 10923, End: 16383, Nodes: []redis.ClusterNode{{ ID: "", Addr: "127.0.0.1:16602", }, { ID: "", Addr: "127.0.0.1:16605", }}, }} Expect(assertSlotsEqual(res, wanted)).NotTo(HaveOccurred()) }) It("should CLUSTER SHARDS", func() { res, err := client.ClusterShards(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).NotTo(BeEmpty()) // Iterate over the ClusterShard results and validate the fields. for _, shard := range res { Expect(shard.Slots).NotTo(BeEmpty()) for _, slotRange := range shard.Slots { Expect(slotRange.Start).To(BeNumerically(">=", 0)) Expect(slotRange.End).To(BeNumerically(">=", slotRange.Start)) } Expect(shard.Nodes).NotTo(BeEmpty()) for _, node := range shard.Nodes { Expect(node.ID).NotTo(BeEmpty()) Expect(node.Endpoint).NotTo(BeEmpty()) Expect(node.IP).NotTo(BeEmpty()) Expect(node.Port).To(BeNumerically(">", 0)) validRoles := []string{"master", "slave", "replica"} Expect(validRoles).To(ContainElement(node.Role)) Expect(node.ReplicationOffset).To(BeNumerically(">=", 0)) validHealthStatuses := []string{"online", "failed", "loading"} Expect(validHealthStatuses).To(ContainElement(node.Health)) } } }) It("should CLUSTER LINKS", func() { res, err := client.ClusterLinks(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).NotTo(BeEmpty()) // Iterate over the ClusterLink results and validate the map keys. for _, link := range res { Expect(link.Direction).NotTo(BeEmpty()) Expect([]string{"from", "to"}).To(ContainElement(link.Direction)) Expect(link.Node).NotTo(BeEmpty()) Expect(link.CreateTime).To(BeNumerically(">", 0)) Expect(link.Events).NotTo(BeEmpty()) validEventChars := []rune{'r', 'w'} for _, eventChar := range link.Events { Expect(validEventChars).To(ContainElement(eventChar)) } Expect(link.SendBufferAllocated).To(BeNumerically(">=", 0)) Expect(link.SendBufferUsed).To(BeNumerically(">=", 0)) } }) It("should cluster client setname", func() { err := client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { return c.Ping(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { val, err := c.ClientList(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(val).Should(ContainSubstring("name=cluster_hi")) return nil }) }) It("should CLUSTER PROTO 3", func() { _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { val, err := c.Do(ctx, "HELLO").Result() Expect(err).NotTo(HaveOccurred()) Expect(val).Should(HaveKeyWithValue("proto", int64(3))) return nil }) }) It("should CLUSTER MYSHARDID", func() { shardID, err := client.ClusterMyShardID(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(shardID).ToNot(BeEmpty()) }) It("should CLUSTER NODES", func() { res, err := client.ClusterNodes(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(res)).To(BeNumerically(">", 400)) }) It("should CLUSTER INFO", func() { res, err := client.ClusterInfo(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(ContainSubstring("cluster_known_nodes:6")) }) It("should CLUSTER KEYSLOT", func() { hashSlot, err := client.ClusterKeySlot(ctx, "somekey").Result() Expect(err).NotTo(HaveOccurred()) Expect(hashSlot).To(Equal(int64(hashtag.Slot("somekey")))) }) It("should CLUSTER GETKEYSINSLOT", func() { keys, err := client.ClusterGetKeysInSlot(ctx, hashtag.Slot("somekey"), 1).Result() Expect(err).NotTo(HaveOccurred()) Expect(len(keys)).To(Equal(0)) }) It("should CLUSTER COUNT-FAILURE-REPORTS", func() { n, err := client.ClusterCountFailureReports(ctx, cluster.nodeIDs[0]).Result() Expect(err).NotTo(HaveOccurred()) Expect(n).To(Equal(int64(0))) }) It("should CLUSTER COUNTKEYSINSLOT", func() { n, err := client.ClusterCountKeysInSlot(ctx, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(n).To(Equal(int64(0))) }) It("should CLUSTER SAVECONFIG", func() { res, err := client.ClusterSaveConfig(ctx).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) }) It("should CLUSTER SLAVES", func() { nodesList, err := client.ClusterSlaves(ctx, cluster.nodeIDs[0]).Result() Expect(err).NotTo(HaveOccurred()) Expect(nodesList).Should(ContainElement(ContainSubstring("slave"))) Expect(nodesList).Should(HaveLen(1)) }) It("should RANDOMKEY", func() { const nkeys = 100 for i := 0; i < nkeys; i++ { err := client.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err() Expect(err).NotTo(HaveOccurred()) } var keys []string addKey := func(key string) { for _, k := range keys { if k == key { return } } keys = append(keys, key) } for i := 0; i < nkeys*10; i++ { key := client.RandomKey(ctx).Val() addKey(key) } Expect(len(keys)).To(BeNumerically("~", nkeys, nkeys/10)) }) It("supports Process hook", func() { testCtx, cancel := context.WithCancel(ctx) defer cancel() err := client.Ping(ctx).Err() Expect(err).NotTo(HaveOccurred()) err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { return node.Ping(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) var stack []string clusterHook := &hook{ processHook: func(hook redis.ProcessHook) redis.ProcessHook { return func(ctx context.Context, cmd redis.Cmder) error { select { case <-testCtx.Done(): return hook(ctx, cmd) default: } Expect(cmd.String()).To(Equal("ping: ")) stack = append(stack, "cluster.BeforeProcess") err := hook(ctx, cmd) Expect(cmd.String()).To(Equal("ping: PONG")) stack = append(stack, "cluster.AfterProcess") return err } }, } client.AddHook(clusterHook) nodeHook := &hook{ processHook: func(hook redis.ProcessHook) redis.ProcessHook { return func(ctx context.Context, cmd redis.Cmder) error { select { case <-testCtx.Done(): return hook(ctx, cmd) default: } Expect(cmd.String()).To(Equal("ping: ")) stack = append(stack, "shard.BeforeProcess") err := hook(ctx, cmd) Expect(cmd.String()).To(Equal("ping: PONG")) stack = append(stack, "shard.AfterProcess") return err } }, } _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { node.AddHook(nodeHook) return nil }) err = client.Ping(ctx).Err() Expect(err).NotTo(HaveOccurred()) Expect(stack).To(Equal([]string{ "cluster.BeforeProcess", "shard.BeforeProcess", "shard.AfterProcess", "cluster.AfterProcess", })) }) It("supports Pipeline hook", func() { err := client.Ping(ctx).Err() Expect(err).NotTo(HaveOccurred()) err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { return node.Ping(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) var stack []string client.AddHook(&hook{ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook { return func(ctx context.Context, cmds []redis.Cmder) error { Expect(cmds).To(HaveLen(1)) Expect(cmds[0].String()).To(Equal("ping: ")) stack = append(stack, "cluster.BeforeProcessPipeline") err := hook(ctx, cmds) Expect(cmds).To(HaveLen(1)) Expect(cmds[0].String()).To(Equal("ping: PONG")) stack = append(stack, "cluster.AfterProcessPipeline") return err } }, }) _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { node.AddHook(&hook{ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook { return func(ctx context.Context, cmds []redis.Cmder) error { Expect(cmds).To(HaveLen(1)) Expect(cmds[0].String()).To(Equal("ping: ")) stack = append(stack, "shard.BeforeProcessPipeline") err := hook(ctx, cmds) Expect(cmds).To(HaveLen(1)) Expect(cmds[0].String()).To(Equal("ping: PONG")) stack = append(stack, "shard.AfterProcessPipeline") return err } }, }) return nil }) _, err = client.Pipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Ping(ctx) return nil }) Expect(err).NotTo(HaveOccurred()) Expect(stack).To(Equal([]string{ "cluster.BeforeProcessPipeline", "shard.BeforeProcessPipeline", "shard.AfterProcessPipeline", "cluster.AfterProcessPipeline", })) }) It("supports TxPipeline hook", func() { err := client.Ping(ctx).Err() Expect(err).NotTo(HaveOccurred()) err = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { return node.Ping(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) var stack []string client.AddHook(&hook{ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook { return func(ctx context.Context, cmds []redis.Cmder) error { Expect(cmds).To(HaveLen(3)) Expect(cmds[1].String()).To(Equal("ping: ")) stack = append(stack, "cluster.BeforeProcessPipeline") err := hook(ctx, cmds) Expect(cmds).To(HaveLen(3)) Expect(cmds[1].String()).To(Equal("ping: PONG")) stack = append(stack, "cluster.AfterProcessPipeline") return err } }, }) _ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error { node.AddHook(&hook{ processPipelineHook: func(hook redis.ProcessPipelineHook) redis.ProcessPipelineHook { return func(ctx context.Context, cmds []redis.Cmder) error { Expect(cmds).To(HaveLen(3)) Expect(cmds[1].String()).To(Equal("ping: ")) stack = append(stack, "shard.BeforeProcessPipeline") err := hook(ctx, cmds) Expect(cmds).To(HaveLen(3)) Expect(cmds[1].String()).To(Equal("ping: PONG")) stack = append(stack, "shard.AfterProcessPipeline") return err } }, }) return nil }) _, err = client.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Ping(ctx) return nil }) Expect(err).NotTo(HaveOccurred()) Expect(stack).To(Equal([]string{ "cluster.BeforeProcessPipeline", "shard.BeforeProcessPipeline", "shard.AfterProcessPipeline", "cluster.AfterProcessPipeline", })) }) It("should return correct replica for key", func() { client, err := client.SlaveForKey(ctx, "test") Expect(err).ToNot(HaveOccurred()) info := client.Info(ctx, "server") Expect(info.Val()).Should(ContainSubstring("tcp_port:16604")) }) It("should return correct master for key", func() { client, err := client.MasterForKey(ctx, "test") Expect(err).ToNot(HaveOccurred()) info := client.Info(ctx, "server") Expect(info.Val()).Should(ContainSubstring("tcp_port:16601")) }) assertClusterClient() }) Describe("ClusterClient with RouteByLatency", func() { BeforeEach(func() { opt = redisClusterOptions() opt.RouteByLatency = true client = cluster.newClusterClient(ctx, opt) err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { Eventually(func() int64 { return client.DBSize(ctx).Val() }, 30*time.Second).Should(Equal(int64(0))) return nil }) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { err := client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { return slave.ReadWrite(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) err = client.Close() Expect(err).NotTo(HaveOccurred()) }) assertClusterClient() }) Describe("ClusterClient with ClusterSlots", func() { BeforeEach(func() { failover = true opt = redisClusterOptions() opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) { slots := []redis.ClusterSlot{{ Start: 0, End: 5460, Nodes: []redis.ClusterNode{{ Addr: ":" + ringShard1Port, }}, }, { Start: 5461, End: 10922, Nodes: []redis.ClusterNode{{ Addr: ":" + ringShard2Port, }}, }, { Start: 10923, End: 16383, Nodes: []redis.ClusterNode{{ Addr: ":" + ringShard3Port, }}, }} return slots, nil } client = cluster.newClusterClient(ctx, opt) err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { Eventually(func() int64 { return client.DBSize(ctx).Val() }, 30*time.Second).Should(Equal(int64(0))) return nil }) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { failover = false err := client.Close() Expect(err).NotTo(HaveOccurred()) }) assertClusterClient() }) Describe("ClusterClient with RouteRandomly and ClusterSlots", func() { BeforeEach(func() { failover = true opt = redisClusterOptions() opt.RouteRandomly = true opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) { slots := []redis.ClusterSlot{{ Start: 0, End: 5460, Nodes: []redis.ClusterNode{{ Addr: ":" + ringShard1Port, }}, }, { Start: 5461, End: 10922, Nodes: []redis.ClusterNode{{ Addr: ":" + ringShard2Port, }}, }, { Start: 10923, End: 16383, Nodes: []redis.ClusterNode{{ Addr: ":" + ringShard3Port, }}, }} return slots, nil } client = cluster.newClusterClient(ctx, opt) err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { Eventually(func() int64 { return client.DBSize(ctx).Val() }, 30*time.Second).Should(Equal(int64(0))) return nil }) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { failover = false err := client.Close() Expect(err).NotTo(HaveOccurred()) }) assertClusterClient() }) Describe("ClusterClient with ClusterSlots with multiple nodes per slot", func() { BeforeEach(func() { failover = true opt = redisClusterOptions() opt.ReadOnly = true opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) { slots := []redis.ClusterSlot{{ Start: 0, End: 5460, Nodes: []redis.ClusterNode{{ Addr: ":16600", }, { Addr: ":16603", }}, }, { Start: 5461, End: 10922, Nodes: []redis.ClusterNode{{ Addr: ":16601", }, { Addr: ":16604", }}, }, { Start: 10923, End: 16383, Nodes: []redis.ClusterNode{{ Addr: ":16602", }, { Addr: ":16605", }}, }} return slots, nil } client = cluster.newClusterClient(ctx, opt) err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { return master.FlushDB(ctx).Err() }) Expect(err).NotTo(HaveOccurred()) err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { Eventually(func() int64 { return client.DBSize(ctx).Val() }, 30*time.Second).Should(Equal(int64(0))) return nil }) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { failover = false err := client.Close() Expect(err).NotTo(HaveOccurred()) }) assertClusterClient() }) }) var _ = Describe("ClusterClient without nodes", func() { var client *redis.ClusterClient BeforeEach(func() { client = redis.NewClusterClient(&redis.ClusterOptions{}) }) AfterEach(func() { Expect(client.Close()).NotTo(HaveOccurred()) }) It("Ping returns an error", func() { err := client.Ping(ctx).Err() Expect(err).To(MatchError("redis: cluster has no nodes")) }) It("pipeline returns an error", func() { _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Ping(ctx) return nil }) Expect(err).To(MatchError("redis: cluster has no nodes")) }) }) var _ = Describe("ClusterClient without valid nodes", func() { var client *redis.ClusterClient BeforeEach(func() { client = redis.NewClusterClient(&redis.ClusterOptions{ Addrs: []string{redisAddr}, }) }) AfterEach(func() { Expect(client.Close()).NotTo(HaveOccurred()) }) It("returns an error", func() { err := client.Ping(ctx).Err() Expect(err).To(MatchError("ERR This instance has cluster support disabled")) }) It("pipeline returns an error", func() { _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Ping(ctx) return nil }) Expect(err).To(MatchError("ERR This instance has cluster support disabled")) }) }) var _ = Describe("ClusterClient with unavailable Cluster", func() { var client *redis.ClusterClient BeforeEach(func() { opt := redisClusterOptions() opt.ReadTimeout = 250 * time.Millisecond opt.WriteTimeout = 250 * time.Millisecond opt.MaxRedirects = 1 client = cluster.newClusterClientUnstable(opt) Expect(client.Ping(ctx).Err()).NotTo(HaveOccurred()) for _, node := range cluster.clients { err := node.ClientPause(ctx, 5*time.Second).Err() Expect(err).NotTo(HaveOccurred()) } }) AfterEach(func() { Expect(client.Close()).NotTo(HaveOccurred()) }) It("recovers when Cluster recovers", func() { err := client.Ping(ctx).Err() Expect(err).To(HaveOccurred()) Eventually(func() error { return client.Ping(ctx).Err() }, "30s").ShouldNot(HaveOccurred()) }) }) var _ = Describe("ClusterClient timeout", func() { var client *redis.ClusterClient AfterEach(func() { _ = client.Close() }) testTimeout := func() { It("Ping timeouts", func() { err := client.Ping(ctx).Err() Expect(err).To(HaveOccurred()) Expect(err.(net.Error).Timeout()).To(BeTrue()) }) It("Pipeline timeouts", func() { _, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Ping(ctx) return nil }) Expect(err).To(HaveOccurred()) Expect(err.(net.Error).Timeout()).To(BeTrue()) }) It("Tx timeouts", func() { err := client.Watch(ctx, func(tx *redis.Tx) error { return tx.Ping(ctx).Err() }, "foo") Expect(err).To(HaveOccurred()) Expect(err.(net.Error).Timeout()).To(BeTrue()) }) It("Tx Pipeline timeouts", func() { err := client.Watch(ctx, func(tx *redis.Tx) error { _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Ping(ctx) return nil }) return err }, "foo") Expect(err).To(HaveOccurred()) Expect(err.(net.Error).Timeout()).To(BeTrue()) }) } const pause = 5 * time.Second Context("read/write timeout", func() { BeforeEach(func() { opt := redisClusterOptions() client = cluster.newClusterClient(ctx, opt) err := client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error { err := client.ClientPause(ctx, pause).Err() opt := client.Options() opt.ReadTimeout = time.Nanosecond opt.WriteTimeout = time.Nanosecond return err }) Expect(err).NotTo(HaveOccurred()) // Overwrite timeouts after the client is initialized. opt.ReadTimeout = time.Nanosecond opt.WriteTimeout = time.Nanosecond opt.MaxRedirects = 0 }) AfterEach(func() { _ = client.ForEachShard(ctx, func(ctx context.Context, client *redis.Client) error { defer GinkgoRecover() opt := client.Options() opt.ReadTimeout = time.Second opt.WriteTimeout = time.Second Eventually(func() error { return client.Ping(ctx).Err() }, 2*pause).ShouldNot(HaveOccurred()) return nil }) err := client.Close() Expect(err).NotTo(HaveOccurred()) }) testTimeout() }) }) var _ = Describe("ClusterClient ParseURL", func() { cases := []struct { test string url string o *redis.ClusterOptions // expected value err error }{ { test: "ParseRedisURL", url: "redis://localhost:123", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}}, }, { test: "ParseRedissURL", url: "rediss://localhost:123", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, TLSConfig: &tls.Config{ServerName: "localhost"}}, }, { test: "MissingRedisPort", url: "redis://localhost", o: &redis.ClusterOptions{Addrs: []string{"localhost:6379"}}, }, { test: "MissingRedissPort", url: "rediss://localhost", o: &redis.ClusterOptions{Addrs: []string{"localhost:6379"}, TLSConfig: &tls.Config{ServerName: "localhost"}}, }, { test: "MultipleRedisURLs", url: "redis://localhost:123?addr=localhost:1234&addr=localhost:12345", o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234", "localhost:12345"}}, }, { test: "MultipleRedissURLs", url: "rediss://localhost:123?addr=localhost:1234&addr=localhost:12345", o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234", "localhost:12345"}, TLSConfig: &tls.Config{ServerName: "localhost"}}, }, { test: "OnlyPassword", url: "redis://:bar@localhost:123", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Password: "bar"}, }, { test: "OnlyUser", url: "redis://foo@localhost:123", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Username: "foo"}, }, { test: "RedisUsernamePassword", url: "redis://foo:bar@localhost:123", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Username: "foo", Password: "bar"}, }, { test: "RedissUsernamePassword", url: "rediss://foo:bar@localhost:123?addr=localhost:1234", o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234"}, Username: "foo", Password: "bar", TLSConfig: &tls.Config{ServerName: "localhost"}}, }, { test: "QueryParameters", url: "redis://localhost:123?read_timeout=2&pool_fifo=true&addr=localhost:1234", o: &redis.ClusterOptions{Addrs: []string{"localhost:123", "localhost:1234"}, ReadTimeout: 2 * time.Second, PoolFIFO: true}, }, { test: "DisabledTimeout", url: "redis://localhost:123?conn_max_idle_time=0", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: -1}, }, { test: "DisabledTimeoutNeg", url: "redis://localhost:123?conn_max_idle_time=-1", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: -1}, }, { test: "UseDefault", url: "redis://localhost:123?conn_max_idle_time=", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: 0}, }, { test: "Protocol", url: "redis://localhost:123?protocol=2", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, Protocol: 2}, }, { test: "ClientName", url: "redis://localhost:123?client_name=cluster_hi", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ClientName: "cluster_hi"}, }, { test: "UseDefaultMissing=", url: "redis://localhost:123?conn_max_idle_time", o: &redis.ClusterOptions{Addrs: []string{"localhost:123"}, ConnMaxIdleTime: 0}, }, { test: "InvalidQueryAddr", url: "rediss://foo:bar@localhost:123?addr=rediss://foo:barr@localhost:1234", err: errors.New(`redis: unable to parse addr param: rediss://foo:barr@localhost:1234`), }, { test: "InvalidInt", url: "redis://localhost?pool_size=five", err: errors.New(`redis: invalid pool_size number: strconv.Atoi: parsing "five": invalid syntax`), }, { test: "InvalidBool", url: "redis://localhost?pool_fifo=yes", err: errors.New(`redis: invalid pool_fifo boolean: expected true/false/1/0 or an empty string, got "yes"`), }, { test: "UnknownParam", url: "redis://localhost?abc=123", err: errors.New("redis: unexpected option: abc"), }, { test: "InvalidScheme", url: "https://google.com", err: errors.New("redis: invalid URL scheme: https"), }, } It("match ParseClusterURL", func() { for i := range cases { tc := cases[i] actual, err := redis.ParseClusterURL(tc.url) if tc.err != nil { Expect(err).Should(MatchError(tc.err)) } else { Expect(err).NotTo(HaveOccurred()) } if err == nil { Expect(tc.o).NotTo(BeNil()) Expect(tc.o.Addrs).To(Equal(actual.Addrs)) Expect(tc.o.TLSConfig).To(Equal(actual.TLSConfig)) Expect(tc.o.Username).To(Equal(actual.Username)) Expect(tc.o.Password).To(Equal(actual.Password)) Expect(tc.o.MaxRetries).To(Equal(actual.MaxRetries)) Expect(tc.o.MinRetryBackoff).To(Equal(actual.MinRetryBackoff)) Expect(tc.o.MaxRetryBackoff).To(Equal(actual.MaxRetryBackoff)) Expect(tc.o.DialTimeout).To(Equal(actual.DialTimeout)) Expect(tc.o.ReadTimeout).To(Equal(actual.ReadTimeout)) Expect(tc.o.WriteTimeout).To(Equal(actual.WriteTimeout)) Expect(tc.o.PoolFIFO).To(Equal(actual.PoolFIFO)) Expect(tc.o.PoolSize).To(Equal(actual.PoolSize)) Expect(tc.o.MinIdleConns).To(Equal(actual.MinIdleConns)) Expect(tc.o.ConnMaxLifetime).To(Equal(actual.ConnMaxLifetime)) Expect(tc.o.ConnMaxIdleTime).To(Equal(actual.ConnMaxIdleTime)) Expect(tc.o.PoolTimeout).To(Equal(actual.PoolTimeout)) } } }) }) dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/RELEASE-NOTES.md0000644000000000000000000002676115024302467024221 0ustar rootroot# Release Notes # 9.10.0 (2025-06-06) ## 🚀 Highlights `go-redis` now supports [vector sets](https://redis.io/docs/latest/develop/data-types/vector-sets/). This data type is marked as "in preview" in Redis and its support in `go-redis` is marked as experimental. You can find examples in the documentation and in the `doctests` folder. # Changes ## 🚀 New Features - feat: support vectorset ([#3375](https://github.com/redis/go-redis/pull/3375)) ## 🧰 Maintenance - Add the missing NewFloatSliceResult for testing ([#3393](https://github.com/redis/go-redis/pull/3393)) - DOC-5078 vector set examples ([#3394](https://github.com/redis/go-redis/pull/3394)) ## Contributors We'd like to thank all the contributors who worked on this release! [@AndBobsYourUncle](https://github.com/AndBobsYourUncle), [@andy-stark-redis](https://github.com/andy-stark-redis), [@fukua95](https://github.com/fukua95) and [@ndyakov](https://github.com/ndyakov) # 9.9.0 (2025-05-27) ## 🚀 Highlights - **Token-based Authentication**: Added `StreamingCredentialsProvider` for dynamic credential updates (experimental) - Can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) for Azure AD authentication - **Connection Statistics**: Added connection waiting statistics for better monitoring - **Failover Improvements**: Added `ParseFailoverURL` for easier failover configuration - **Ring Client Enhancements**: Added shard access methods for better Pub/Sub management ## ✨ New Features - Added `StreamingCredentialsProvider` for token-based authentication ([#3320](https://github.com/redis/go-redis/pull/3320)) - Supports dynamic credential updates - Includes connection close hooks - Note: Currently marked as experimental - Added `ParseFailoverURL` for parsing failover URLs ([#3362](https://github.com/redis/go-redis/pull/3362)) - Added connection waiting statistics ([#2804](https://github.com/redis/go-redis/pull/2804)) - Added new utility functions: - `ParseFloat` and `MustParseFloat` in public utils package ([#3371](https://github.com/redis/go-redis/pull/3371)) - Unit tests for `Atoi`, `ParseInt`, `ParseUint`, and `ParseFloat` ([#3377](https://github.com/redis/go-redis/pull/3377)) - Added Ring client shard access methods: - `GetShardClients()` to retrieve all active shard clients - `GetShardClientForKey(key string)` to get the shard client for a specific key ([#3388](https://github.com/redis/go-redis/pull/3388)) ## 🐛 Bug Fixes - Fixed routing reads to loading slave nodes ([#3370](https://github.com/redis/go-redis/pull/3370)) - Added support for nil lag in XINFO GROUPS ([#3369](https://github.com/redis/go-redis/pull/3369)) - Fixed pool acquisition timeout issues ([#3381](https://github.com/redis/go-redis/pull/3381)) - Optimized unnecessary copy operations ([#3376](https://github.com/redis/go-redis/pull/3376)) ## 📚 Documentation - Updated documentation for XINFO GROUPS with nil lag support ([#3369](https://github.com/redis/go-redis/pull/3369)) - Added package-level comments for new features ## ⚡ Performance and Reliability - Optimized `ReplaceSpaces` function ([#3383](https://github.com/redis/go-redis/pull/3383)) - Set default value for `Options.Protocol` in `init()` ([#3387](https://github.com/redis/go-redis/pull/3387)) - Exported pool errors for public consumption ([#3380](https://github.com/redis/go-redis/pull/3380)) ## 🔧 Dependencies and Infrastructure - Updated Redis CI to version 8.0.1 ([#3372](https://github.com/redis/go-redis/pull/3372)) - Updated spellcheck GitHub Actions ([#3389](https://github.com/redis/go-redis/pull/3389)) - Removed unused parameters ([#3382](https://github.com/redis/go-redis/pull/3382), [#3384](https://github.com/redis/go-redis/pull/3384)) ## 🧪 Testing - Added unit tests for pool acquisition timeout ([#3381](https://github.com/redis/go-redis/pull/3381)) - Added unit tests for utility functions ([#3377](https://github.com/redis/go-redis/pull/3377)) ## 👥 Contributors We would like to thank all the contributors who made this release possible: [@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@LINKIWI](https://github.com/LINKIWI), [@iamamirsalehi](https://github.com/iamamirsalehi), [@fukua95](https://github.com/fukua95), [@lzakharov](https://github.com/lzakharov), [@DengY11](https://github.com/DengY11) ## 📝 Changelog For a complete list of changes, see the [full changelog](https://github.com/redis/go-redis/compare/v9.8.0...v9.9.0). # 9.8.0 (2025-04-30) ## 🚀 Highlights - **Redis 8 Support**: Full compatibility with Redis 8.0, including testing and CI integration - **Enhanced Hash Operations**: Added support for new hash commands (`HGETDEL`, `HGETEX`, `HSETEX`) and `HSTRLEN` command - **Search Improvements**: Enabled Search DIALECT 2 by default and added `CountOnly` argument for `FT.Search` ## ✨ New Features - Added support for new hash commands: `HGETDEL`, `HGETEX`, `HSETEX` ([#3305](https://github.com/redis/go-redis/pull/3305)) - Added `HSTRLEN` command for hash operations ([#2843](https://github.com/redis/go-redis/pull/2843)) - Added `Do` method for raw query by single connection from `pool.Conn()` ([#3182](https://github.com/redis/go-redis/pull/3182)) - Prevent false-positive marshaling by treating zero time.Time as empty in isEmptyValue ([#3273](https://github.com/redis/go-redis/pull/3273)) - Added FailoverClusterClient support for Universal client ([#2794](https://github.com/redis/go-redis/pull/2794)) - Added support for cluster mode with `IsClusterMode` config parameter ([#3255](https://github.com/redis/go-redis/pull/3255)) - Added client name support in `HELLO` RESP handshake ([#3294](https://github.com/redis/go-redis/pull/3294)) - **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213)) - Added read-only option for failover configurations ([#3281](https://github.com/redis/go-redis/pull/3281)) - Added `CountOnly` argument for `FT.Search` to use `LIMIT 0 0` ([#3338](https://github.com/redis/go-redis/pull/3338)) - Added `DB` option support in `NewFailoverClusterClient` ([#3342](https://github.com/redis/go-redis/pull/3342)) - Added `nil` check for the options when creating a client ([#3363](https://github.com/redis/go-redis/pull/3363)) ## 🐛 Bug Fixes - Fixed `PubSub` concurrency safety issues ([#3360](https://github.com/redis/go-redis/pull/3360)) - Fixed panic caused when argument is `nil` ([#3353](https://github.com/redis/go-redis/pull/3353)) - Improved error handling when fetching master node from sentinels ([#3349](https://github.com/redis/go-redis/pull/3349)) - Fixed connection pool timeout issues and increased retries ([#3298](https://github.com/redis/go-redis/pull/3298)) - Fixed context cancellation error leading to connection spikes on Primary instances ([#3190](https://github.com/redis/go-redis/pull/3190)) - Fixed RedisCluster client to consider `MASTERDOWN` a retriable error ([#3164](https://github.com/redis/go-redis/pull/3164)) - Fixed tracing to show complete commands instead of truncated versions ([#3290](https://github.com/redis/go-redis/pull/3290)) - Fixed OpenTelemetry instrumentation to prevent multiple span reporting ([#3168](https://github.com/redis/go-redis/pull/3168)) - Fixed `FT.Search` Limit argument and added `CountOnly` argument for limit 0 0 ([#3338](https://github.com/redis/go-redis/pull/3338)) - Fixed missing command in interface ([#3344](https://github.com/redis/go-redis/pull/3344)) - Fixed slot calculation for `COUNTKEYSINSLOT` command ([#3327](https://github.com/redis/go-redis/pull/3327)) - Updated PubSub implementation with correct context ([#3329](https://github.com/redis/go-redis/pull/3329)) ## 📚 Documentation - Added hash search examples ([#3357](https://github.com/redis/go-redis/pull/3357)) - Fixed documentation comments ([#3351](https://github.com/redis/go-redis/pull/3351)) - Added `CountOnly` search example ([#3345](https://github.com/redis/go-redis/pull/3345)) - Added examples for list commands: `LLEN`, `LPOP`, `LPUSH`, `LRANGE`, `RPOP`, `RPUSH` ([#3234](https://github.com/redis/go-redis/pull/3234)) - Added `SADD` and `SMEMBERS` command examples ([#3242](https://github.com/redis/go-redis/pull/3242)) - Updated `README.md` to use Redis Discord guild ([#3331](https://github.com/redis/go-redis/pull/3331)) - Updated `HExpire` command documentation ([#3355](https://github.com/redis/go-redis/pull/3355)) - Featured OpenTelemetry instrumentation more prominently ([#3316](https://github.com/redis/go-redis/pull/3316)) - Updated `README.md` with additional information ([#310ce55](https://github.com/redis/go-redis/commit/310ce55)) ## ⚡ Performance and Reliability - Bound connection pool background dials to configured dial timeout ([#3089](https://github.com/redis/go-redis/pull/3089)) - Ensured context isn't exhausted via concurrent query ([#3334](https://github.com/redis/go-redis/pull/3334)) ## 🔧 Dependencies and Infrastructure - Updated testing image to Redis 8.0-RC2 ([#3361](https://github.com/redis/go-redis/pull/3361)) - Enabled CI for Redis CE 8.0 ([#3274](https://github.com/redis/go-redis/pull/3274)) - Updated various dependencies: - Bumped golangci/golangci-lint-action from 6.5.0 to 7.0.0 ([#3354](https://github.com/redis/go-redis/pull/3354)) - Bumped rojopolis/spellcheck-github-actions ([#3336](https://github.com/redis/go-redis/pull/3336)) - Bumped golang.org/x/net in example/otel ([#3308](https://github.com/redis/go-redis/pull/3308)) - Migrated golangci-lint configuration to v2 format ([#3354](https://github.com/redis/go-redis/pull/3354)) ## ⚠️ Breaking Changes - **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213)) - Dropped RedisGears (Triggers and Functions) support ([#3321](https://github.com/redis/go-redis/pull/3321)) - Dropped FT.PROFILE command that was never enabled ([#3323](https://github.com/redis/go-redis/pull/3323)) ## 🔒 Security - Fixed network error handling on SETINFO (CVE-2025-29923) ([#3295](https://github.com/redis/go-redis/pull/3295)) ## 🧪 Testing - Added integration tests for Redis 8 behavior changes in Redis Search ([#3337](https://github.com/redis/go-redis/pull/3337)) - Added vector types INT8 and UINT8 tests ([#3299](https://github.com/redis/go-redis/pull/3299)) - Added test codes for search_commands.go ([#3285](https://github.com/redis/go-redis/pull/3285)) - Fixed example test sorting ([#3292](https://github.com/redis/go-redis/pull/3292)) ## 👥 Contributors We would like to thank all the contributors who made this release possible: [@alexander-menshchikov](https://github.com/alexander-menshchikov), [@EXPEbdodla](https://github.com/EXPEbdodla), [@afti](https://github.com/afti), [@dmaier-redislabs](https://github.com/dmaier-redislabs), [@four_leaf_clover](https://github.com/four_leaf_clover), [@alohaglenn](https://github.com/alohaglenn), [@gh73962](https://github.com/gh73962), [@justinmir](https://github.com/justinmir), [@LINKIWI](https://github.com/LINKIWI), [@liushuangbill](https://github.com/liushuangbill), [@golang88](https://github.com/golang88), [@gnpaone](https://github.com/gnpaone), [@ndyakov](https://github.com/ndyakov), [@nikolaydubina](https://github.com/nikolaydubina), [@oleglacto](https://github.com/oleglacto), [@andy-stark-redis](https://github.com/andy-stark-redis), [@rodneyosodo](https://github.com/rodneyosodo), [@dependabot](https://github.com/dependabot), [@rfyiamcool](https://github.com/rfyiamcool), [@frankxjkuang](https://github.com/frankxjkuang), [@fukua95](https://github.com/fukua95), [@soleymani-milad](https://github.com/soleymani-milad), [@ofekshenawa](https://github.com/ofekshenawa), [@khasanovbi](https://github.com/khasanovbi) dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/fuzz/0000755000000000000000000000000015024302467022713 5ustar rootrootdependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/fuzz/fuzz.go0000644000000000000000000000166115024302467024244 0ustar rootroot//go:build gofuzz // +build gofuzz package fuzz import ( "context" "time" "github.com/redis/go-redis/v9" ) var ( ctx = context.Background() rdb *redis.Client ) func init() { rdb = redis.NewClient(&redis.Options{ Addr: ":6379", DialTimeout: 10 * time.Second, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, PoolSize: 10, PoolTimeout: 10 * time.Second, }) } func Fuzz(data []byte) int { arrayLen := len(data) if arrayLen < 4 { return -1 } maxIter := int(uint(data[0])) for i := 0; i < maxIter && i < arrayLen; i++ { n := i % arrayLen if n == 0 { _ = rdb.Set(ctx, string(data[i:]), string(data[i:]), 0).Err() } else if n == 1 { _, _ = rdb.Get(ctx, string(data[i:])).Result() } else if n == 2 { _, _ = rdb.Incr(ctx, string(data[i:])).Result() } else if n == 3 { var cursor uint64 _, _, _ = rdb.Scan(ctx, cursor, string(data[i:]), 10).Result() } } return 1 } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/go.mod0000644000000000000000000000076215024302467023030 0ustar rootrootmodule github.com/redis/go-redis/v9 go 1.18 require ( github.com/bsm/ginkgo/v2 v2.12.0 github.com/bsm/gomega v1.27.10 github.com/cespare/xxhash/v2 v2.3.0 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ) retract ( v9.7.2 // This version was accidentally released. Please use version 9.7.3 instead. v9.5.4 // This version was accidentally released. Please use version 9.6.0 instead. v9.5.3 // This version was accidentally released. Please use version 9.6.0 instead. ) dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/stream_commands.go0000644000000000000000000003153415024302467025426 0ustar rootrootpackage redis import ( "context" "time" ) type StreamCmdable interface { XAdd(ctx context.Context, a *XAddArgs) *StringCmd XDel(ctx context.Context, stream string, ids ...string) *IntCmd XLen(ctx context.Context, stream string) *IntCmd XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd XGroupDestroy(ctx context.Context, stream, group string) *IntCmd XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd XPending(ctx context.Context, stream, group string) *XPendingCmd XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd XTrimMinID(ctx context.Context, key string, minID string) *IntCmd XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd XInfoStream(ctx context.Context, key string) *XInfoStreamCmd XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd } // XAddArgs accepts values in the following formats: // - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"} // - XAddArgs.Values = []string("key1", "value1", "key2", "value2") // - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"} // // Note that map will not preserve the order of key-value pairs. // MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used. type XAddArgs struct { Stream string NoMkStream bool MaxLen int64 // MAXLEN N MinID string // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). Approx bool Limit int64 ID string Values interface{} } func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { args := make([]interface{}, 0, 11) args = append(args, "xadd", a.Stream) if a.NoMkStream { args = append(args, "nomkstream") } switch { case a.MaxLen > 0: if a.Approx { args = append(args, "maxlen", "~", a.MaxLen) } else { args = append(args, "maxlen", a.MaxLen) } case a.MinID != "": if a.Approx { args = append(args, "minid", "~", a.MinID) } else { args = append(args, "minid", a.MinID) } } if a.Limit > 0 { args = append(args, "limit", a.Limit) } if a.ID != "" { args = append(args, a.ID) } else { args = append(args, "*") } args = appendArg(args, a.Values) cmd := NewStringCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { args := []interface{}{"xdel", stream} for _, id := range ids { args = append(args, id) } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { cmd := NewIntCmd(ctx, "xlen", stream) _ = c(ctx, cmd) return cmd } func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop) _ = c(ctx, cmd) return cmd } func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count) _ = c(ctx, cmd) return cmd } func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop) _ = c(ctx, cmd) return cmd } func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count) _ = c(ctx, cmd) return cmd } type XReadArgs struct { Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 Count int64 Block time.Duration ID string } func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { args := make([]interface{}, 0, 2*len(a.Streams)+6) args = append(args, "xread") keyPos := int8(1) if a.Count > 0 { args = append(args, "count") args = append(args, a.Count) keyPos += 2 } if a.Block >= 0 { args = append(args, "block") args = append(args, int64(a.Block/time.Millisecond)) keyPos += 2 } args = append(args, "streams") keyPos++ for _, s := range a.Streams { args = append(args, s) } if a.ID != "" { for range a.Streams { args = append(args, a.ID) } } cmd := NewXStreamSliceCmd(ctx, args...) if a.Block >= 0 { cmd.setReadTimeout(a.Block) } cmd.SetFirstKeyPos(keyPos) _ = c(ctx, cmd) return cmd } func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd { return c.XRead(ctx, &XReadArgs{ Streams: streams, Block: -1, }) } func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) cmd.SetFirstKeyPos(2) _ = c(ctx, cmd) return cmd } type XReadGroupArgs struct { Group string Consumer string Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 Count int64 Block time.Duration NoAck bool } func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { args := make([]interface{}, 0, 10+len(a.Streams)) args = append(args, "xreadgroup", "group", a.Group, a.Consumer) keyPos := int8(4) if a.Count > 0 { args = append(args, "count", a.Count) keyPos += 2 } if a.Block >= 0 { args = append(args, "block", int64(a.Block/time.Millisecond)) keyPos += 2 } if a.NoAck { args = append(args, "noack") keyPos++ } args = append(args, "streams") keyPos++ for _, s := range a.Streams { args = append(args, s) } cmd := NewXStreamSliceCmd(ctx, args...) if a.Block >= 0 { cmd.setReadTimeout(a.Block) } cmd.SetFirstKeyPos(keyPos) _ = c(ctx, cmd) return cmd } func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd { args := []interface{}{"xack", stream, group} for _, id := range ids { args = append(args, id) } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd { cmd := NewXPendingCmd(ctx, "xpending", stream, group) _ = c(ctx, cmd) return cmd } type XPendingExtArgs struct { Stream string Group string Idle time.Duration Start string End string Count int64 Consumer string } func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd { args := make([]interface{}, 0, 9) args = append(args, "xpending", a.Stream, a.Group) if a.Idle != 0 { args = append(args, "idle", formatMs(ctx, a.Idle)) } args = append(args, a.Start, a.End, a.Count) if a.Consumer != "" { args = append(args, a.Consumer) } cmd := NewXPendingExtCmd(ctx, args...) _ = c(ctx, cmd) return cmd } type XAutoClaimArgs struct { Stream string Group string MinIdle time.Duration Start string Count int64 Consumer string } func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd { args := xAutoClaimArgs(ctx, a) cmd := NewXAutoClaimCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd { args := xAutoClaimArgs(ctx, a) args = append(args, "justid") cmd := NewXAutoClaimJustIDCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} { args := make([]interface{}, 0, 8) args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start) if a.Count > 0 { args = append(args, "count", a.Count) } return args } type XClaimArgs struct { Stream string Group string Consumer string MinIdle time.Duration Messages []string } func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd { args := xClaimArgs(a) cmd := NewXMessageSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd { args := xClaimArgs(a) args = append(args, "justid") cmd := NewStringSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func xClaimArgs(a *XClaimArgs) []interface{} { args := make([]interface{}, 0, 5+len(a.Messages)) args = append(args, "xclaim", a.Stream, a.Group, a.Consumer, int64(a.MinIdle/time.Millisecond)) for _, id := range a.Messages { args = append(args, id) } return args } // xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). // example: // // XTRIM key MAXLEN/MINID threshold LIMIT limit. // XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. // // The redis-server version is lower than 6.2, please set limit to 0. func (c cmdable) xTrim( ctx context.Context, key, strategy string, approx bool, threshold interface{}, limit int64, ) *IntCmd { args := make([]interface{}, 0, 7) args = append(args, "xtrim", key, strategy) if approx { args = append(args, "~") } args = append(args, threshold) if limit > 0 { args = append(args, "limit", limit) } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // XTrimMaxLen No `~` rules are used, `limit` cannot be used. // cmd: XTRIM key MAXLEN maxLen func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd { return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) } func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) } func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { return c.xTrim(ctx, key, "minid", false, minID, 0) } func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { return c.xTrim(ctx, key, "minid", true, minID, limit) } func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { cmd := NewXInfoConsumersCmd(ctx, key, group) _ = c(ctx, cmd) return cmd } func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd { cmd := NewXInfoGroupsCmd(ctx, key) _ = c(ctx, cmd) return cmd } func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd { cmd := NewXInfoStreamCmd(ctx, key) _ = c(ctx, cmd) return cmd } // XInfoStreamFull XINFO STREAM FULL [COUNT count] // redis-server >= 6.0. func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd { args := make([]interface{}, 0, 6) args = append(args, "xinfo", "stream", key, "full") if count > 0 { args = append(args, "count", count) } cmd := NewXInfoStreamFullCmd(ctx, args...) _ = c(ctx, cmd) return cmd } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/doc.go0000644000000000000000000000007515024302467023013 0ustar rootroot/* Package redis implements a Redis client. */ package redis dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/set_commands.go0000644000000000000000000001467215024302467024732 0ustar rootrootpackage redis import "context" type SetCmdable interface { SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd SCard(ctx context.Context, key string) *IntCmd SDiff(ctx context.Context, keys ...string) *StringSliceCmd SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd SInter(ctx context.Context, keys ...string) *StringSliceCmd SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd SMembers(ctx context.Context, key string) *StringSliceCmd SMembersMap(ctx context.Context, key string) *StringStructMapCmd SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd SPop(ctx context.Context, key string) *StringCmd SPopN(ctx context.Context, key string, count int64) *StringSliceCmd SRandMember(ctx context.Context, key string) *StringCmd SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd SRem(ctx context.Context, key string, members ...interface{}) *IntCmd SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd SUnion(ctx context.Context, keys ...string) *StringSliceCmd SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd } //------------------------------------------------------------------------------ func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(members)) args[0] = "sadd" args[1] = key args = appendArgs(args, members) cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { cmd := NewIntCmd(ctx, "scard", key) _ = c(ctx, cmd) return cmd } func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)) args[0] = "sdiff" for i, key := range keys { args[1+i] = key } cmd := NewStringSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "sdiffstore" args[1] = destination for i, key := range keys { args[2+i] = key } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)) args[0] = "sinter" for i, key := range keys { args[1+i] = key } cmd := NewStringSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { args := make([]interface{}, 4+len(keys)) args[0] = "sintercard" numkeys := int64(0) for i, key := range keys { args[2+i] = key numkeys++ } args[1] = numkeys args[2+numkeys] = "limit" args[3+numkeys] = limit cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "sinterstore" args[1] = destination for i, key := range keys { args[2+i] = key } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { cmd := NewBoolCmd(ctx, "sismember", key, member) _ = c(ctx, cmd) return cmd } // SMIsMember Redis `SMISMEMBER key member [member ...]` command. func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd { args := make([]interface{}, 2, 2+len(members)) args[0] = "smismember" args[1] = key args = appendArgs(args, members) cmd := NewBoolSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // SMembers Redis `SMEMBERS key` command output as a slice. func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "smembers", key) _ = c(ctx, cmd) return cmd } // SMembersMap Redis `SMEMBERS key` command output as a map. func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { cmd := NewStringStructMapCmd(ctx, "smembers", key) _ = c(ctx, cmd) return cmd } func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { cmd := NewBoolCmd(ctx, "smove", source, destination, member) _ = c(ctx, cmd) return cmd } // SPop Redis `SPOP key` command. func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { cmd := NewStringCmd(ctx, "spop", key) _ = c(ctx, cmd) return cmd } // SPopN Redis `SPOP key count` command. func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "spop", key, count) _ = c(ctx, cmd) return cmd } // SRandMember Redis `SRANDMEMBER key` command. func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { cmd := NewStringCmd(ctx, "srandmember", key) _ = c(ctx, cmd) return cmd } // SRandMemberN Redis `SRANDMEMBER key count` command. func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "srandmember", key, count) _ = c(ctx, cmd) return cmd } func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(members)) args[0] = "srem" args[1] = key args = appendArgs(args, members) cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)) args[0] = "sunion" for i, key := range keys { args[1+i] = key } cmd := NewStringSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "sunionstore" args[1] = destination for i, key := range keys { args[2+i] = key } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { args := []interface{}{"sscan", key, cursor} if match != "" { args = append(args, "match", match) } if count > 0 { args = append(args, "count", count) } cmd := NewScanCmd(ctx, c, args...) _ = c(ctx, cmd) return cmd } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/osscluster.go0000644000000000000000000013252315024302467024460 0ustar rootrootpackage redis import ( "context" "crypto/tls" "fmt" "math" "net" "net/url" "runtime" "sort" "strings" "sync" "sync/atomic" "time" "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hashtag" "github.com/redis/go-redis/v9/internal/pool" "github.com/redis/go-redis/v9/internal/proto" "github.com/redis/go-redis/v9/internal/rand" ) const ( minLatencyMeasurementInterval = 10 * time.Second ) var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") // ClusterOptions are used to configure a cluster client and should be // passed to NewClusterClient. type ClusterOptions struct { // A seed list of host:port addresses of cluster nodes. Addrs []string // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. ClientName string // NewClient creates a cluster node client with provided name and options. NewClient func(opt *Options) *Client // The maximum number of retries before giving up. Command is retried // on network errors and MOVED/ASK redirects. // Default is 3 retries. MaxRedirects int // Enables read-only commands on slave nodes. ReadOnly bool // Allows routing read-only commands to the closest master or slave node. // It automatically enables ReadOnly. RouteByLatency bool // Allows routing read-only commands to the random master or slave node. // It automatically enables ReadOnly. RouteRandomly bool // Optional function that returns cluster slots information. // It is useful to manually create cluster of standalone Redis servers // and load-balance read/write operations between master and slaves. // It can use service like ZooKeeper to maintain configuration information // and Cluster.ReloadState to manually trigger state reloading. ClusterSlots func(context.Context) ([]ClusterSlot, error) // Following options are copied from Options struct. Dialer func(ctx context.Context, network, addr string) (net.Conn, error) OnConnect func(ctx context.Context, cn *Conn) error Protocol int Username string Password string CredentialsProvider func() (username string, password string) CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) StreamingCredentialsProvider auth.StreamingCredentialsProvider MaxRetries int MinRetryBackoff time.Duration MaxRetryBackoff time.Duration DialTimeout time.Duration ReadTimeout time.Duration WriteTimeout time.Duration ContextTimeoutEnabled bool PoolFIFO bool PoolSize int // applies per cluster node and not for the whole cluster PoolTimeout time.Duration MinIdleConns int MaxIdleConns int MaxActiveConns int // applies per cluster node and not for the whole cluster ConnMaxIdleTime time.Duration ConnMaxLifetime time.Duration TLSConfig *tls.Config // DisableIndentity - Disable set-lib on connect. // // default: false // // Deprecated: Use DisableIdentity instead. DisableIndentity bool // DisableIdentity is used to disable CLIENT SETINFO command on connect. // // default: false DisableIdentity bool IdentitySuffix string // Add suffix to client name. Default is empty. // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. UnstableResp3 bool } func (opt *ClusterOptions) init() { switch opt.MaxRedirects { case -1: opt.MaxRedirects = 0 case 0: opt.MaxRedirects = 3 } if opt.RouteByLatency || opt.RouteRandomly { opt.ReadOnly = true } if opt.PoolSize == 0 { opt.PoolSize = 5 * runtime.GOMAXPROCS(0) } switch opt.ReadTimeout { case -1: opt.ReadTimeout = 0 case 0: opt.ReadTimeout = 3 * time.Second } switch opt.WriteTimeout { case -1: opt.WriteTimeout = 0 case 0: opt.WriteTimeout = opt.ReadTimeout } if opt.MaxRetries == 0 { opt.MaxRetries = -1 } switch opt.MinRetryBackoff { case -1: opt.MinRetryBackoff = 0 case 0: opt.MinRetryBackoff = 8 * time.Millisecond } switch opt.MaxRetryBackoff { case -1: opt.MaxRetryBackoff = 0 case 0: opt.MaxRetryBackoff = 512 * time.Millisecond } if opt.NewClient == nil { opt.NewClient = NewClient } } // ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis. // The URL must be in the form: // // redis://:@: // or // rediss://:@: // // To add additional addresses, specify the query parameter, "addr" one or more times. e.g: // // redis://:@:?addr=:&addr=: // or // rediss://:@:?addr=:&addr=: // // Most Option fields can be set using query parameters, with the following restrictions: // - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries // - only scalar type fields are supported (bool, int, time.Duration) // - for time.Duration fields, values must be a valid input for time.ParseDuration(); // additionally a plain integer as value (i.e. without unit) is interpreted as seconds // - to disable a duration field, use value less than or equal to 0; to use the default // value, leave the value blank or remove the parameter // - only the last value is interpreted if a parameter is given multiple times // - fields "network", "addr", "username" and "password" can only be set using other // URL attributes (scheme, host, userinfo, resp.), query parameters using these // names will be treated as unknown parameters // - unknown parameter names will result in an error // // Example: // // redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 // is equivalent to: // &ClusterOptions{ // Addr: ["localhost:6789", "localhost:6790", "localhost:6791"] // DialTimeout: 3 * time.Second, // no time unit = seconds // ReadTimeout: 6 * time.Second, // } func ParseClusterURL(redisURL string) (*ClusterOptions, error) { o := &ClusterOptions{} u, err := url.Parse(redisURL) if err != nil { return nil, err } // add base URL to the array of addresses // more addresses may be added through the URL params h, p := getHostPortWithDefaults(u) o.Addrs = append(o.Addrs, net.JoinHostPort(h, p)) // setup username, password, and other configurations o, err = setupClusterConn(u, h, o) if err != nil { return nil, err } return o, nil } // setupClusterConn gets the username and password from the URL and the query parameters. func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) { switch u.Scheme { case "rediss": o.TLSConfig = &tls.Config{ServerName: host} fallthrough case "redis": o.Username, o.Password = getUserPassword(u) default: return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) } // retrieve the configuration from the query parameters o, err := setupClusterQueryParams(u, o) if err != nil { return nil, err } return o, nil } // setupClusterQueryParams converts query parameters in u to option value in o. func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) { q := queryOptions{q: u.Query()} o.Protocol = q.int("protocol") o.ClientName = q.string("client_name") o.MaxRedirects = q.int("max_redirects") o.ReadOnly = q.bool("read_only") o.RouteByLatency = q.bool("route_by_latency") o.RouteRandomly = q.bool("route_randomly") o.MaxRetries = q.int("max_retries") o.MinRetryBackoff = q.duration("min_retry_backoff") o.MaxRetryBackoff = q.duration("max_retry_backoff") o.DialTimeout = q.duration("dial_timeout") o.ReadTimeout = q.duration("read_timeout") o.WriteTimeout = q.duration("write_timeout") o.PoolFIFO = q.bool("pool_fifo") o.PoolSize = q.int("pool_size") o.MinIdleConns = q.int("min_idle_conns") o.MaxIdleConns = q.int("max_idle_conns") o.MaxActiveConns = q.int("max_active_conns") o.PoolTimeout = q.duration("pool_timeout") o.ConnMaxLifetime = q.duration("conn_max_lifetime") o.ConnMaxIdleTime = q.duration("conn_max_idle_time") if q.err != nil { return nil, q.err } // addr can be specified as many times as needed addrs := q.strings("addr") for _, addr := range addrs { h, p, err := net.SplitHostPort(addr) if err != nil || h == "" || p == "" { return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr) } o.Addrs = append(o.Addrs, net.JoinHostPort(h, p)) } // any parameters left? if r := q.remaining(); len(r) > 0 { return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) } return o, nil } func (opt *ClusterOptions) clientOptions() *Options { return &Options{ ClientName: opt.ClientName, Dialer: opt.Dialer, OnConnect: opt.OnConnect, Protocol: opt.Protocol, Username: opt.Username, Password: opt.Password, CredentialsProvider: opt.CredentialsProvider, CredentialsProviderContext: opt.CredentialsProviderContext, StreamingCredentialsProvider: opt.StreamingCredentialsProvider, MaxRetries: opt.MaxRetries, MinRetryBackoff: opt.MinRetryBackoff, MaxRetryBackoff: opt.MaxRetryBackoff, DialTimeout: opt.DialTimeout, ReadTimeout: opt.ReadTimeout, WriteTimeout: opt.WriteTimeout, ContextTimeoutEnabled: opt.ContextTimeoutEnabled, PoolFIFO: opt.PoolFIFO, PoolSize: opt.PoolSize, PoolTimeout: opt.PoolTimeout, MinIdleConns: opt.MinIdleConns, MaxIdleConns: opt.MaxIdleConns, MaxActiveConns: opt.MaxActiveConns, ConnMaxIdleTime: opt.ConnMaxIdleTime, ConnMaxLifetime: opt.ConnMaxLifetime, DisableIdentity: opt.DisableIdentity, DisableIndentity: opt.DisableIdentity, IdentitySuffix: opt.IdentitySuffix, TLSConfig: opt.TLSConfig, // If ClusterSlots is populated, then we probably have an artificial // cluster whose nodes are not in clustering mode (otherwise there isn't // much use for ClusterSlots config). This means we cannot execute the // READONLY command against that node -- setting readOnly to false in such // situations in the options below will prevent that from happening. readOnly: opt.ReadOnly && opt.ClusterSlots == nil, UnstableResp3: opt.UnstableResp3, } } //------------------------------------------------------------------------------ type clusterNode struct { Client *Client latency uint32 // atomic generation uint32 // atomic failing uint32 // atomic // last time the latency measurement was performed for the node, stored in nanoseconds // from epoch lastLatencyMeasurement int64 // atomic } func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { opt := clOpt.clientOptions() opt.Addr = addr node := clusterNode{ Client: clOpt.NewClient(opt), } node.latency = math.MaxUint32 if clOpt.RouteByLatency { go node.updateLatency() } return &node } func (n *clusterNode) String() string { return n.Client.String() } func (n *clusterNode) Close() error { return n.Client.Close() } const maximumNodeLatency = 1 * time.Minute func (n *clusterNode) updateLatency() { const numProbe = 10 var dur uint64 successes := 0 for i := 0; i < numProbe; i++ { time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) start := time.Now() err := n.Client.Ping(context.TODO()).Err() if err == nil { dur += uint64(time.Since(start) / time.Microsecond) successes++ } } var latency float64 if successes == 0 { // If none of the pings worked, set latency to some arbitrarily high value so this node gets // least priority. latency = float64((maximumNodeLatency) / time.Microsecond) } else { latency = float64(dur) / float64(successes) } atomic.StoreUint32(&n.latency, uint32(latency+0.5)) n.SetLastLatencyMeasurement(time.Now()) } func (n *clusterNode) Latency() time.Duration { latency := atomic.LoadUint32(&n.latency) return time.Duration(latency) * time.Microsecond } func (n *clusterNode) MarkAsFailing() { atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) } func (n *clusterNode) Failing() bool { const timeout = 15 // 15 seconds failing := atomic.LoadUint32(&n.failing) if failing == 0 { return false } if time.Now().Unix()-int64(failing) < timeout { return true } atomic.StoreUint32(&n.failing, 0) return false } func (n *clusterNode) Generation() uint32 { return atomic.LoadUint32(&n.generation) } func (n *clusterNode) LastLatencyMeasurement() int64 { return atomic.LoadInt64(&n.lastLatencyMeasurement) } func (n *clusterNode) SetGeneration(gen uint32) { for { v := atomic.LoadUint32(&n.generation) if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { break } } } func (n *clusterNode) SetLastLatencyMeasurement(t time.Time) { for { v := atomic.LoadInt64(&n.lastLatencyMeasurement) if t.UnixNano() < v || atomic.CompareAndSwapInt64(&n.lastLatencyMeasurement, v, t.UnixNano()) { break } } } func (n *clusterNode) Loading() bool { ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() err := n.Client.Ping(ctx).Err() return err != nil && isLoadingError(err) } //------------------------------------------------------------------------------ type clusterNodes struct { opt *ClusterOptions mu sync.RWMutex addrs []string nodes map[string]*clusterNode activeAddrs []string closed bool onNewNode []func(rdb *Client) _generation uint32 // atomic } func newClusterNodes(opt *ClusterOptions) *clusterNodes { return &clusterNodes{ opt: opt, addrs: opt.Addrs, nodes: make(map[string]*clusterNode), } } func (c *clusterNodes) Close() error { c.mu.Lock() defer c.mu.Unlock() if c.closed { return nil } c.closed = true var firstErr error for _, node := range c.nodes { if err := node.Client.Close(); err != nil && firstErr == nil { firstErr = err } } c.nodes = nil c.activeAddrs = nil return firstErr } func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) { c.mu.Lock() c.onNewNode = append(c.onNewNode, fn) c.mu.Unlock() } func (c *clusterNodes) Addrs() ([]string, error) { var addrs []string c.mu.RLock() closed := c.closed //nolint:ifshort if !closed { if len(c.activeAddrs) > 0 { addrs = make([]string, len(c.activeAddrs)) copy(addrs, c.activeAddrs) } else { addrs = make([]string, len(c.addrs)) copy(addrs, c.addrs) } } c.mu.RUnlock() if closed { return nil, pool.ErrClosed } if len(addrs) == 0 { return nil, errClusterNoNodes } return addrs, nil } func (c *clusterNodes) NextGeneration() uint32 { return atomic.AddUint32(&c._generation, 1) } // GC removes unused nodes. func (c *clusterNodes) GC(generation uint32) { //nolint:prealloc var collected []*clusterNode c.mu.Lock() c.activeAddrs = c.activeAddrs[:0] now := time.Now() for addr, node := range c.nodes { if node.Generation() >= generation { c.activeAddrs = append(c.activeAddrs, addr) if c.opt.RouteByLatency && node.LastLatencyMeasurement() < now.Add(-minLatencyMeasurementInterval).UnixNano() { go node.updateLatency() } continue } delete(c.nodes, addr) collected = append(collected, node) } c.mu.Unlock() for _, node := range collected { _ = node.Client.Close() } } func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { node, err := c.get(addr) if err != nil { return nil, err } if node != nil { return node, nil } c.mu.Lock() defer c.mu.Unlock() if c.closed { return nil, pool.ErrClosed } node, ok := c.nodes[addr] if ok { return node, nil } node = newClusterNode(c.opt, addr) for _, fn := range c.onNewNode { fn(node.Client) } c.addrs = appendIfNotExists(c.addrs, addr) c.nodes[addr] = node return node, nil } func (c *clusterNodes) get(addr string) (*clusterNode, error) { var node *clusterNode var err error c.mu.RLock() if c.closed { err = pool.ErrClosed } else { node = c.nodes[addr] } c.mu.RUnlock() return node, err } func (c *clusterNodes) All() ([]*clusterNode, error) { c.mu.RLock() defer c.mu.RUnlock() if c.closed { return nil, pool.ErrClosed } cp := make([]*clusterNode, 0, len(c.nodes)) for _, node := range c.nodes { cp = append(cp, node) } return cp, nil } func (c *clusterNodes) Random() (*clusterNode, error) { addrs, err := c.Addrs() if err != nil { return nil, err } n := rand.Intn(len(addrs)) return c.GetOrCreate(addrs[n]) } //------------------------------------------------------------------------------ type clusterSlot struct { start, end int nodes []*clusterNode } type clusterSlotSlice []*clusterSlot func (p clusterSlotSlice) Len() int { return len(p) } func (p clusterSlotSlice) Less(i, j int) bool { return p[i].start < p[j].start } func (p clusterSlotSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } type clusterState struct { nodes *clusterNodes Masters []*clusterNode Slaves []*clusterNode slots []*clusterSlot generation uint32 createdAt time.Time } func newClusterState( nodes *clusterNodes, slots []ClusterSlot, origin string, ) (*clusterState, error) { c := clusterState{ nodes: nodes, slots: make([]*clusterSlot, 0, len(slots)), generation: nodes.NextGeneration(), createdAt: time.Now(), } originHost, _, _ := net.SplitHostPort(origin) isLoopbackOrigin := isLoopback(originHost) for _, slot := range slots { var nodes []*clusterNode for i, slotNode := range slot.Nodes { addr := slotNode.Addr if !isLoopbackOrigin { addr = replaceLoopbackHost(addr, originHost) } node, err := c.nodes.GetOrCreate(addr) if err != nil { return nil, err } node.SetGeneration(c.generation) nodes = append(nodes, node) if i == 0 { c.Masters = appendUniqueNode(c.Masters, node) } else { c.Slaves = appendUniqueNode(c.Slaves, node) } } c.slots = append(c.slots, &clusterSlot{ start: slot.Start, end: slot.End, nodes: nodes, }) } sort.Sort(clusterSlotSlice(c.slots)) time.AfterFunc(time.Minute, func() { nodes.GC(c.generation) }) return &c, nil } func replaceLoopbackHost(nodeAddr, originHost string) string { nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) if err != nil { return nodeAddr } nodeIP := net.ParseIP(nodeHost) if nodeIP == nil { return nodeAddr } if !nodeIP.IsLoopback() { return nodeAddr } // Use origin host which is not loopback and node port. return net.JoinHostPort(originHost, nodePort) } func isLoopback(host string) bool { ip := net.ParseIP(host) if ip == nil { return true } return ip.IsLoopback() } func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { nodes := c.slotNodes(slot) if len(nodes) > 0 { return nodes[0], nil } return c.nodes.Random() } func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { nodes := c.slotNodes(slot) switch len(nodes) { case 0: return c.nodes.Random() case 1: return nodes[0], nil case 2: slave := nodes[1] if !slave.Failing() && !slave.Loading() { return slave, nil } return nodes[0], nil default: var slave *clusterNode for i := 0; i < 10; i++ { n := rand.Intn(len(nodes)-1) + 1 slave = nodes[n] if !slave.Failing() && !slave.Loading() { return slave, nil } } // All slaves are loading - use master. return nodes[0], nil } } func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { nodes := c.slotNodes(slot) if len(nodes) == 0 { return c.nodes.Random() } var allNodesFailing = true var ( closestNonFailingNode *clusterNode closestNode *clusterNode minLatency time.Duration ) // setting the max possible duration as zerovalue for minlatency minLatency = time.Duration(math.MaxInt64) for _, n := range nodes { if closestNode == nil || n.Latency() < minLatency { closestNode = n minLatency = n.Latency() if !n.Failing() { closestNonFailingNode = n allNodesFailing = false } } } // pick the healthly node with the lowest latency if !allNodesFailing && closestNonFailingNode != nil { return closestNonFailingNode, nil } // if all nodes are failing, we will pick the temporarily failing node with lowest latency if minLatency < maximumNodeLatency && closestNode != nil { internal.Logger.Printf(context.TODO(), "redis: all nodes are marked as failed, picking the temporarily failing node with lowest latency") return closestNode, nil } // If all nodes are having the maximum latency(all pings are failing) - return a random node across the cluster internal.Logger.Printf(context.TODO(), "redis: pings to all nodes are failing, picking a random node across the cluster") return c.nodes.Random() } func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { nodes := c.slotNodes(slot) if len(nodes) == 0 { return c.nodes.Random() } if len(nodes) == 1 { return nodes[0], nil } randomNodes := rand.Perm(len(nodes)) for _, idx := range randomNodes { if node := nodes[idx]; !node.Failing() { return node, nil } } return nodes[randomNodes[0]], nil } func (c *clusterState) slotNodes(slot int) []*clusterNode { i := sort.Search(len(c.slots), func(i int) bool { return c.slots[i].end >= slot }) if i >= len(c.slots) { return nil } x := c.slots[i] if slot >= x.start && slot <= x.end { return x.nodes } return nil } //------------------------------------------------------------------------------ type clusterStateHolder struct { load func(ctx context.Context) (*clusterState, error) state atomic.Value reloading uint32 // atomic } func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { return &clusterStateHolder{ load: fn, } } func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) { state, err := c.load(ctx) if err != nil { return nil, err } c.state.Store(state) return state, nil } func (c *clusterStateHolder) LazyReload() { if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { return } go func() { defer atomic.StoreUint32(&c.reloading, 0) _, err := c.Reload(context.Background()) if err != nil { return } time.Sleep(200 * time.Millisecond) }() } func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { v := c.state.Load() if v == nil { return c.Reload(ctx) } state := v.(*clusterState) if time.Since(state.createdAt) > 10*time.Second { c.LazyReload() } return state, nil } func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) { state, err := c.Reload(ctx) if err == nil { return state, nil } return c.Get(ctx) } //------------------------------------------------------------------------------ // ClusterClient is a Redis Cluster client representing a pool of zero // or more underlying connections. It's safe for concurrent use by // multiple goroutines. type ClusterClient struct { opt *ClusterOptions nodes *clusterNodes state *clusterStateHolder cmdsInfoCache *cmdsInfoCache cmdable hooksMixin } // NewClusterClient returns a Redis Cluster client as described in // http://redis.io/topics/cluster-spec. func NewClusterClient(opt *ClusterOptions) *ClusterClient { if opt == nil { panic("redis: NewClusterClient nil options") } opt.init() c := &ClusterClient{ opt: opt, nodes: newClusterNodes(opt), } c.state = newClusterStateHolder(c.loadState) c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) c.cmdable = c.Process c.initHooks(hooks{ dial: nil, process: c.process, pipeline: c.processPipeline, txPipeline: c.processTxPipeline, }) return c } // Options returns read-only Options that were used to create the client. func (c *ClusterClient) Options() *ClusterOptions { return c.opt } // ReloadState reloads cluster state. If available it calls ClusterSlots func // to get cluster slots information. func (c *ClusterClient) ReloadState(ctx context.Context) { c.state.LazyReload() } // Close closes the cluster client, releasing any open resources. // // It is rare to Close a ClusterClient, as the ClusterClient is meant // to be long-lived and shared between many goroutines. func (c *ClusterClient) Close() error { return c.nodes.Close() } // Do create a Cmd from the args and processes the cmd. func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { cmd := NewCmd(ctx, args...) _ = c.Process(ctx, cmd) return cmd } func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { err := c.processHook(ctx, cmd) cmd.SetErr(err) return err } func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { slot := c.cmdSlot(cmd) var node *clusterNode var moved bool var ask bool var lastErr error for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { // MOVED and ASK responses are not transient errors that require retry delay; they // should be attempted immediately. if attempt > 0 && !moved && !ask { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { return err } } if node == nil { var err error node, err = c.cmdNode(ctx, cmd.Name(), slot) if err != nil { return err } } if ask { ask = false pipe := node.Client.Pipeline() _ = pipe.Process(ctx, NewCmd(ctx, "asking")) _ = pipe.Process(ctx, cmd) _, lastErr = pipe.Exec(ctx) } else { lastErr = node.Client.Process(ctx, cmd) } // If there is no error - we are done. if lastErr == nil { return nil } if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed { if isReadOnly { c.state.LazyReload() } node = nil continue } // If slave is loading - pick another node. if c.opt.ReadOnly && isLoadingError(lastErr) { node.MarkAsFailing() node = nil continue } var addr string moved, ask, addr = isMovedError(lastErr) if moved || ask { c.state.LazyReload() var err error node, err = c.nodes.GetOrCreate(addr) if err != nil { return err } continue } if shouldRetry(lastErr, cmd.readTimeout() == nil) { // First retry the same node. if attempt == 0 { continue } // Second try another node. node.MarkAsFailing() node = nil continue } return lastErr } return lastErr } func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) { c.nodes.OnNewNode(fn) } // ForEachMaster concurrently calls the fn on each master node in the cluster. // It returns the first error if any. func (c *ClusterClient) ForEachMaster( ctx context.Context, fn func(ctx context.Context, client *Client) error, ) error { state, err := c.state.ReloadOrGet(ctx) if err != nil { return err } var wg sync.WaitGroup errCh := make(chan error, 1) for _, master := range state.Masters { wg.Add(1) go func(node *clusterNode) { defer wg.Done() err := fn(ctx, node.Client) if err != nil { select { case errCh <- err: default: } } }(master) } wg.Wait() select { case err := <-errCh: return err default: return nil } } // ForEachSlave concurrently calls the fn on each slave node in the cluster. // It returns the first error if any. func (c *ClusterClient) ForEachSlave( ctx context.Context, fn func(ctx context.Context, client *Client) error, ) error { state, err := c.state.ReloadOrGet(ctx) if err != nil { return err } var wg sync.WaitGroup errCh := make(chan error, 1) for _, slave := range state.Slaves { wg.Add(1) go func(node *clusterNode) { defer wg.Done() err := fn(ctx, node.Client) if err != nil { select { case errCh <- err: default: } } }(slave) } wg.Wait() select { case err := <-errCh: return err default: return nil } } // ForEachShard concurrently calls the fn on each known node in the cluster. // It returns the first error if any. func (c *ClusterClient) ForEachShard( ctx context.Context, fn func(ctx context.Context, client *Client) error, ) error { state, err := c.state.ReloadOrGet(ctx) if err != nil { return err } var wg sync.WaitGroup errCh := make(chan error, 1) worker := func(node *clusterNode) { defer wg.Done() err := fn(ctx, node.Client) if err != nil { select { case errCh <- err: default: } } } for _, node := range state.Masters { wg.Add(1) go worker(node) } for _, node := range state.Slaves { wg.Add(1) go worker(node) } wg.Wait() select { case err := <-errCh: return err default: return nil } } // PoolStats returns accumulated connection pool stats. func (c *ClusterClient) PoolStats() *PoolStats { var acc PoolStats state, _ := c.state.Get(context.TODO()) if state == nil { return &acc } for _, node := range state.Masters { s := node.Client.connPool.Stats() acc.Hits += s.Hits acc.Misses += s.Misses acc.Timeouts += s.Timeouts acc.TotalConns += s.TotalConns acc.IdleConns += s.IdleConns acc.StaleConns += s.StaleConns } for _, node := range state.Slaves { s := node.Client.connPool.Stats() acc.Hits += s.Hits acc.Misses += s.Misses acc.Timeouts += s.Timeouts acc.TotalConns += s.TotalConns acc.IdleConns += s.IdleConns acc.StaleConns += s.StaleConns } return &acc } func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { if c.opt.ClusterSlots != nil { slots, err := c.opt.ClusterSlots(ctx) if err != nil { return nil, err } return newClusterState(c.nodes, slots, "") } addrs, err := c.nodes.Addrs() if err != nil { return nil, err } var firstErr error for _, idx := range rand.Perm(len(addrs)) { addr := addrs[idx] node, err := c.nodes.GetOrCreate(addr) if err != nil { if firstErr == nil { firstErr = err } continue } slots, err := node.Client.ClusterSlots(ctx).Result() if err != nil { if firstErr == nil { firstErr = err } continue } return newClusterState(c.nodes, slots, node.Client.opt.Addr) } /* * No node is connectable. It's possible that all nodes' IP has changed. * Clear activeAddrs to let client be able to re-connect using the initial * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]), * which might have chance to resolve domain name and get updated IP address. */ c.nodes.mu.Lock() c.nodes.activeAddrs = nil c.nodes.mu.Unlock() return nil, firstErr } func (c *ClusterClient) Pipeline() Pipeliner { pipe := Pipeline{ exec: pipelineExecer(c.processPipelineHook), } pipe.init() return &pipe } func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { return c.Pipeline().Pipelined(ctx, fn) } func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { cmdsMap := newCmdsMap() if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil { setCmdsErr(cmds, err) return err } for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { setCmdsErr(cmds, err) return err } } failedCmds := newCmdsMap() var wg sync.WaitGroup for node, cmds := range cmdsMap.m { wg.Add(1) go func(node *clusterNode, cmds []Cmder) { defer wg.Done() c.processPipelineNode(ctx, node, cmds, failedCmds) }(node, cmds) } wg.Wait() if len(failedCmds.m) == 0 { break } cmdsMap = failedCmds } return cmdsFirstErr(cmds) } func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error { state, err := c.state.Get(ctx) if err != nil { return err } if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) { for _, cmd := range cmds { slot := c.cmdSlot(cmd) node, err := c.slotReadOnlyNode(state, slot) if err != nil { return err } cmdsMap.Add(node, cmd) } return nil } for _, cmd := range cmds { slot := c.cmdSlot(cmd) node, err := state.slotMasterNode(slot) if err != nil { return err } cmdsMap.Add(node, cmd) } return nil } func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool { for _, cmd := range cmds { cmdInfo := c.cmdInfo(ctx, cmd.Name()) if cmdInfo == nil || !cmdInfo.ReadOnly { return false } } return true } func (c *ClusterClient) processPipelineNode( ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, ) { _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { cn, err := node.Client.getConn(ctx) if err != nil { if !isContextError(err) { node.MarkAsFailing() } _ = c.mapCmdsByNode(ctx, failedCmds, cmds) setCmdsErr(cmds, err) return err } var processErr error defer func() { node.Client.releaseConn(ctx, cn, processErr) }() processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds) return processErr }) } func (c *ClusterClient) processPipelineNodeConn( ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, ) error { if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { if isBadConn(err, false, node.Client.getAddr()) { node.MarkAsFailing() } if shouldRetry(err, true) { _ = c.mapCmdsByNode(ctx, failedCmds, cmds) } setCmdsErr(cmds, err) return err } return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) }) } func (c *ClusterClient) pipelineReadCmds( ctx context.Context, node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap, ) error { for i, cmd := range cmds { err := cmd.readReply(rd) cmd.SetErr(err) if err == nil { continue } if c.checkMovedErr(ctx, cmd, err, failedCmds) { continue } if c.opt.ReadOnly && isBadConn(err, false, node.Client.getAddr()) { node.MarkAsFailing() } if !isRedisError(err) { if shouldRetry(err, true) { _ = c.mapCmdsByNode(ctx, failedCmds, cmds) } setCmdsErr(cmds[i+1:], err) return err } } if err := cmds[0].Err(); err != nil && shouldRetry(err, true) { _ = c.mapCmdsByNode(ctx, failedCmds, cmds) return err } return nil } func (c *ClusterClient) checkMovedErr( ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap, ) bool { moved, ask, addr := isMovedError(err) if !moved && !ask { return false } node, err := c.nodes.GetOrCreate(addr) if err != nil { return false } if moved { c.state.LazyReload() failedCmds.Add(node, cmd) return true } if ask { failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) return true } panic("not reached") } // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. func (c *ClusterClient) TxPipeline() Pipeliner { pipe := Pipeline{ exec: func(ctx context.Context, cmds []Cmder) error { cmds = wrapMultiExec(ctx, cmds) return c.processTxPipelineHook(ctx, cmds) }, } pipe.init() return &pipe } func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { return c.TxPipeline().Pipelined(ctx, fn) } func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { // Trim multi .. exec. cmds = cmds[1 : len(cmds)-1] state, err := c.state.Get(ctx) if err != nil { setCmdsErr(cmds, err) return err } cmdsMap := c.mapCmdsBySlot(cmds) for slot, cmds := range cmdsMap { node, err := state.slotMasterNode(slot) if err != nil { setCmdsErr(cmds, err) continue } cmdsMap := map[*clusterNode][]Cmder{node: cmds} for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { setCmdsErr(cmds, err) return err } } failedCmds := newCmdsMap() var wg sync.WaitGroup for node, cmds := range cmdsMap { wg.Add(1) go func(node *clusterNode, cmds []Cmder) { defer wg.Done() c.processTxPipelineNode(ctx, node, cmds, failedCmds) }(node, cmds) } wg.Wait() if len(failedCmds.m) == 0 { break } cmdsMap = failedCmds.m } } return cmdsFirstErr(cmds) } func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { cmdsMap := make(map[int][]Cmder) for _, cmd := range cmds { slot := c.cmdSlot(cmd) cmdsMap[slot] = append(cmdsMap[slot], cmd) } return cmdsMap } func (c *ClusterClient) processTxPipelineNode( ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, ) { cmds = wrapMultiExec(ctx, cmds) _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { cn, err := node.Client.getConn(ctx) if err != nil { _ = c.mapCmdsByNode(ctx, failedCmds, cmds) setCmdsErr(cmds, err) return err } var processErr error defer func() { node.Client.releaseConn(ctx, cn, processErr) }() processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds) return processErr }) } func (c *ClusterClient) processTxPipelineNodeConn( ctx context.Context, _ *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, ) error { if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) }); err != nil { if shouldRetry(err, true) { _ = c.mapCmdsByNode(ctx, failedCmds, cmds) } setCmdsErr(cmds, err) return err } return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { statusCmd := cmds[0].(*StatusCmd) // Trim multi and exec. trimmedCmds := cmds[1 : len(cmds)-1] if err := c.txPipelineReadQueued( ctx, rd, statusCmd, trimmedCmds, failedCmds, ); err != nil { setCmdsErr(cmds, err) moved, ask, addr := isMovedError(err) if moved || ask { return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds) } return err } return pipelineReadCmds(rd, trimmedCmds) }) } func (c *ClusterClient) txPipelineReadQueued( ctx context.Context, rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder, failedCmds *cmdsMap, ) error { // Parse queued replies. if err := statusCmd.readReply(rd); err != nil { return err } for _, cmd := range cmds { err := statusCmd.readReply(rd) if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { continue } return err } // Parse number of replies. line, err := rd.ReadLine() if err != nil { if err == Nil { err = TxFailedErr } return err } if line[0] != proto.RespArray { return fmt.Errorf("redis: expected '*', but got line %q", line) } return nil } func (c *ClusterClient) cmdsMoved( ctx context.Context, cmds []Cmder, moved, ask bool, addr string, failedCmds *cmdsMap, ) error { node, err := c.nodes.GetOrCreate(addr) if err != nil { return err } if moved { c.state.LazyReload() for _, cmd := range cmds { failedCmds.Add(node, cmd) } return nil } if ask { for _, cmd := range cmds { failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) } return nil } return nil } func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { if len(keys) == 0 { return fmt.Errorf("redis: Watch requires at least one key") } slot := hashtag.Slot(keys[0]) for _, key := range keys[1:] { if hashtag.Slot(key) != slot { err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") return err } } node, err := c.slotMasterNode(ctx, slot) if err != nil { return err } for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { return err } } err = node.Client.Watch(ctx, fn, keys...) if err == nil { break } moved, ask, addr := isMovedError(err) if moved || ask { node, err = c.nodes.GetOrCreate(addr) if err != nil { return err } continue } if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed { if isReadOnly { c.state.LazyReload() } node, err = c.slotMasterNode(ctx, slot) if err != nil { return err } continue } if shouldRetry(err, true) { continue } return err } return err } func (c *ClusterClient) pubSub() *PubSub { var node *clusterNode pubsub := &PubSub{ opt: c.opt.clientOptions(), newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { if node != nil { panic("node != nil") } var err error if len(channels) > 0 { slot := hashtag.Slot(channels[0]) node, err = c.slotMasterNode(ctx, slot) } else { node, err = c.nodes.Random() } if err != nil { return nil, err } cn, err := node.Client.newConn(context.TODO()) if err != nil { node = nil return nil, err } return cn, nil }, closeConn: func(cn *pool.Conn) error { err := node.Client.connPool.CloseConn(cn) node = nil return err }, } pubsub.init() return pubsub } // Subscribe subscribes the client to the specified channels. // Channels can be omitted to create empty subscription. func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub { pubsub := c.pubSub() if len(channels) > 0 { _ = pubsub.Subscribe(ctx, channels...) } return pubsub } // PSubscribe subscribes the client to the given patterns. // Patterns can be omitted to create empty subscription. func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { pubsub := c.pubSub() if len(channels) > 0 { _ = pubsub.PSubscribe(ctx, channels...) } return pubsub } // SSubscribe Subscribes the client to the specified shard channels. func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub { pubsub := c.pubSub() if len(channels) > 0 { _ = pubsub.SSubscribe(ctx, channels...) } return pubsub } func (c *ClusterClient) retryBackoff(attempt int) time.Duration { return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) } func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { // Try 3 random nodes. const nodeLimit = 3 addrs, err := c.nodes.Addrs() if err != nil { return nil, err } var firstErr error perm := rand.Perm(len(addrs)) if len(perm) > nodeLimit { perm = perm[:nodeLimit] } for _, idx := range perm { addr := addrs[idx] node, err := c.nodes.GetOrCreate(addr) if err != nil { if firstErr == nil { firstErr = err } continue } info, err := node.Client.Command(ctx).Result() if err == nil { return info, nil } if firstErr == nil { firstErr = err } } if firstErr == nil { panic("not reached") } return nil, firstErr } func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo { cmdsInfo, err := c.cmdsInfoCache.Get(ctx) if err != nil { internal.Logger.Printf(context.TODO(), "getting command info: %s", err) return nil } info := cmdsInfo[name] if info == nil { internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name) } return info } func (c *ClusterClient) cmdSlot(cmd Cmder) int { args := cmd.Args() if args[0] == "cluster" && (args[1] == "getkeysinslot" || args[1] == "countkeysinslot") { return args[2].(int) } return cmdSlot(cmd, cmdFirstKeyPos(cmd)) } func cmdSlot(cmd Cmder, pos int) int { if pos == 0 { return hashtag.RandomSlot() } firstKey := cmd.stringArg(pos) return hashtag.Slot(firstKey) } func (c *ClusterClient) cmdNode( ctx context.Context, cmdName string, slot int, ) (*clusterNode, error) { state, err := c.state.Get(ctx) if err != nil { return nil, err } if c.opt.ReadOnly { cmdInfo := c.cmdInfo(ctx, cmdName) if cmdInfo != nil && cmdInfo.ReadOnly { return c.slotReadOnlyNode(state, slot) } } return state.slotMasterNode(slot) } func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { if c.opt.RouteByLatency { return state.slotClosestNode(slot) } if c.opt.RouteRandomly { return state.slotRandomNode(slot) } return state.slotSlaveNode(slot) } func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) { state, err := c.state.Get(ctx) if err != nil { return nil, err } return state.slotMasterNode(slot) } // SlaveForKey gets a client for a replica node to run any command on it. // This is especially useful if we want to run a particular lua script which has // only read only commands on the replica. // This is because other redis commands generally have a flag that points that // they are read only and automatically run on the replica nodes // if ClusterOptions.ReadOnly flag is set to true. func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) { state, err := c.state.Get(ctx) if err != nil { return nil, err } slot := hashtag.Slot(key) node, err := c.slotReadOnlyNode(state, slot) if err != nil { return nil, err } return node.Client, err } // MasterForKey return a client to the master node for a particular key. func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) { slot := hashtag.Slot(key) node, err := c.slotMasterNode(ctx, slot) if err != nil { return nil, err } return node.Client, err } func (c *ClusterClient) context(ctx context.Context) context.Context { if c.opt.ContextTimeoutEnabled { return ctx } return context.Background() } func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { for _, n := range nodes { if n == node { return nodes } } return append(nodes, node) } func appendIfNotExists(ss []string, es ...string) []string { loop: for _, e := range es { for _, s := range ss { if s == e { continue loop } } ss = append(ss, e) } return ss } //------------------------------------------------------------------------------ type cmdsMap struct { mu sync.Mutex m map[*clusterNode][]Cmder } func newCmdsMap() *cmdsMap { return &cmdsMap{ m: make(map[*clusterNode][]Cmder), } } func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) { m.mu.Lock() m.m[node] = append(m.m[node], cmds...) m.mu.Unlock() } dependencies/pkg/mod/github.com/redis/go-redis/v9@v9.10.0/commands.go0000644000000000000000000004713615024302467024060 0ustar rootrootpackage redis import ( "context" "encoding" "errors" "fmt" "io" "net" "reflect" "runtime" "strings" "time" "github.com/redis/go-redis/v9/internal" ) // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, // otherwise you will receive an error: (error) ERR syntax error. // For example: // // rdb.Set(ctx, key, value, redis.KeepTTL) const KeepTTL = -1 func usePrecise(dur time.Duration) bool { return dur < time.Second || dur%time.Second != 0 } func formatMs(ctx context.Context, dur time.Duration) int64 { if dur > 0 && dur < time.Millisecond { internal.Logger.Printf( ctx, "specified duration is %s, but minimal supported value is %s - truncating to 1ms", dur, time.Millisecond, ) return 1 } return int64(dur / time.Millisecond) } func formatSec(ctx context.Context, dur time.Duration) int64 { if dur > 0 && dur < time.Second { internal.Logger.Printf( ctx, "specified duration is %s, but minimal supported value is %s - truncating to 1s", dur, time.Second, ) return 1 } return int64(dur / time.Second) } func appendArgs(dst, src []interface{}) []interface{} { if len(src) == 1 { return appendArg(dst, src[0]) } dst = append(dst, src...) return dst } func appendArg(dst []interface{}, arg interface{}) []interface{} { switch arg := arg.(type) { case []string: for _, s := range arg { dst = append(dst, s) } return dst case []interface{}: dst = append(dst, arg...) return dst case map[string]interface{}: for k, v := range arg { dst = append(dst, k, v) } return dst case map[string]string: for k, v := range arg { dst = append(dst, k, v) } return dst case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP: return append(dst, arg) case nil: return dst default: // scan struct field v := reflect.ValueOf(arg) if v.Type().Kind() == reflect.Ptr { if v.IsNil() { // error: arg is not a valid object return dst } v = v.Elem() } if v.Type().Kind() == reflect.Struct { return appendStructField(dst, v) } return append(dst, arg) } } // appendStructField appends the field and value held by the structure v to dst, and returns the appended dst. func appendStructField(dst []interface{}, v reflect.Value) []interface{} { typ := v.Type() for i := 0; i < typ.NumField(); i++ { tag := typ.Field(i).Tag.Get("redis") if tag == "" || tag == "-" { continue } name, opt, _ := strings.Cut(tag, ",") if name == "" { continue } field := v.Field(i) // miss field if omitEmpty(opt) && isEmptyValue(field) { continue } if field.CanInterface() { dst = append(dst, name, field.Interface()) } } return dst } func omitEmpty(opt string) bool { for opt != "" { var name string name, opt, _ = strings.Cut(opt, ",") if name == "omitempty" { return true } } return false } func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Interface, reflect.Pointer: return v.IsNil() case reflect.Struct: if v.Type() == reflect.TypeOf(time.Time{}) { return v.IsZero() } // Only supports the struct time.Time, // subsequent iterations will follow the func Scan support decoder. } return false } type Cmdable interface { Pipeline() Pipeliner Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) TxPipeline() Pipeliner Command(ctx context.Context) *CommandsInfoCmd CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd ClientGetName(ctx context.Context) *StringCmd Echo(ctx context.Context, message interface{}) *StringCmd Ping(ctx context.Context) *StatusCmd Quit(ctx context.Context) *StatusCmd Unlink(ctx context.Context, keys ...string) *IntCmd BgRewriteAOF(ctx context.Context) *StatusCmd BgSave(ctx context.Context) *StatusCmd ClientKill(ctx context.Context, ipPort string) *StatusCmd ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd ClientList(ctx context.Context) *StringCmd ClientInfo(ctx context.Context) *ClientInfoCmd ClientPause(ctx context.Context, dur time.Duration) *BoolCmd ClientUnpause(ctx context.Context) *BoolCmd ClientID(ctx context.Context) *IntCmd ClientUnblock(ctx context.Context, id int64) *IntCmd ClientUnblockWithError(ctx context.Context, id int64) *IntCmd ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd ConfigResetStat(ctx context.Context) *StatusCmd ConfigSet(ctx context.Context, parameter, value string) *StatusCmd ConfigRewrite(ctx context.Context) *StatusCmd DBSize(ctx context.Context) *IntCmd FlushAll(ctx context.Context) *StatusCmd FlushAllAsync(ctx context.Context) *StatusCmd FlushDB(ctx context.Context) *StatusCmd FlushDBAsync(ctx context.Context) *StatusCmd Info(ctx context.Context, section ...string) *StringCmd LastSave(ctx context.Context) *IntCmd Save(ctx context.Context) *StatusCmd Shutdown(ctx context.Context) *StatusCmd ShutdownSave(ctx context.Context) *StatusCmd ShutdownNoSave(ctx context.Context) *StatusCmd SlaveOf(ctx context.Context, host, port string) *StatusCmd SlowLogGet(ctx context.Context, num int64) *SlowLogCmd Time(ctx context.Context) *TimeCmd DebugObject(ctx context.Context, key string) *StringCmd MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd ACLCmdable BitMapCmdable ClusterCmdable GenericCmdable GeoCmdable HashCmdable HyperLogLogCmdable ListCmdable ProbabilisticCmdable PubSubCmdable ScriptingFunctionsCmdable SearchCmdable SetCmdable SortedSetCmdable StringCmdable StreamCmdable TimeseriesCmdable JSONCmdable VectorSetCmdable } type StatefulCmdable interface { Cmdable Auth(ctx context.Context, password string) *StatusCmd AuthACL(ctx context.Context, username, password string) *StatusCmd Select(ctx context.Context, index int) *StatusCmd SwapDB(ctx context.Context, index1, index2 int) *StatusCmd ClientSetName(ctx context.Context, name string) *BoolCmd ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd } var ( _ Cmdable = (*Client)(nil) _ Cmdable = (*Tx)(nil) _ Cmdable = (*Ring)(nil) _ Cmdable = (*ClusterClient)(nil) ) type cmdable func(ctx context.Context, cmd Cmder) error type statefulCmdable func(ctx context.Context, cmd Cmder) error //------------------------------------------------------------------------------ func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd { cmd := NewStatusCmd(ctx, "auth", password) _ = c(ctx, cmd) return cmd } // AuthACL Perform an AUTH command, using the given user and pass. // Should be used to authenticate the current connection with one of the connections defined in the ACL list // when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system. func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd { cmd := NewStatusCmd(ctx, "auth", username, password) _ = c(ctx, cmd) return cmd } func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) cmd.setReadTimeout(timeout) _ = c(ctx, cmd) return cmd } func (c cmdable) WaitAOF(ctx context.Context, numLocal, numSlaves int, timeout time.Duration) *IntCmd { cmd := NewIntCmd(ctx, "waitAOF", numLocal, numSlaves, int(timeout/time.Millisecond)) cmd.setReadTimeout(timeout) _ = c(ctx, cmd) return cmd } func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd { cmd := NewStatusCmd(ctx, "select", index) _ = c(ctx, cmd) return cmd } func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd { cmd := NewStatusCmd(ctx, "swapdb", index1, index2) _ = c(ctx, cmd) return cmd } // ClientSetName assigns a name to the connection. func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd { cmd := NewBoolCmd(ctx, "client", "setname", name) _ = c(ctx, cmd) return cmd } // ClientSetInfo sends a CLIENT SETINFO command with the provided info. func (c statefulCmdable) ClientSetInfo(ctx context.Context, info LibraryInfo) *StatusCmd { err := info.Validate() if err != nil { panic(err.Error()) } var cmd *StatusCmd if info.LibName != nil { libName := fmt.Sprintf("go-redis(%s,%s)", *info.LibName, internal.ReplaceSpaces(runtime.Version())) cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-NAME", libName) } else { cmd = NewStatusCmd(ctx, "client", "setinfo", "LIB-VER", *info.LibVer) } _ = c(ctx, cmd) return cmd } // Validate checks if only one field in the struct is non-nil. func (info LibraryInfo) Validate() error { if info.LibName != nil && info.LibVer != nil { return errors.New("both LibName and LibVer cannot be set at the same time") } if info.LibName == nil && info.LibVer == nil { return errors.New("at least one of LibName and LibVer should be set") } return nil } // Hello sets the resp protocol used. func (c statefulCmdable) Hello(ctx context.Context, ver int, username, password, clientName string, ) *MapStringInterfaceCmd { args := make([]interface{}, 0, 7) args = append(args, "hello", ver) if password != "" { if username != "" { args = append(args, "auth", username, password) } else { args = append(args, "auth", "default", password) } } if clientName != "" { args = append(args, "setname", clientName) } cmd := NewMapStringInterfaceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } //------------------------------------------------------------------------------ func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { cmd := NewCommandsInfoCmd(ctx, "command") _ = c(ctx, cmd) return cmd } // FilterBy is used for the `CommandList` command parameter. type FilterBy struct { Module string ACLCat string Pattern string } func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd { args := make([]interface{}, 0, 5) args = append(args, "command", "list") if filter != nil { if filter.Module != "" { args = append(args, "filterby", "module", filter.Module) } else if filter.ACLCat != "" { args = append(args, "filterby", "aclcat", filter.ACLCat) } else if filter.Pattern != "" { args = append(args, "filterby", "pattern", filter.Pattern) } } cmd := NewStringSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd { args := make([]interface{}, 2+len(commands)) args[0] = "command" args[1] = "getkeys" copy(args[2:], commands) cmd := NewStringSliceCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd { args := make([]interface{}, 2+len(commands)) args[0] = "command" args[1] = "getkeysandflags" copy(args[2:], commands) cmd := NewKeyFlagsCmd(ctx, args...) _ = c(ctx, cmd) return cmd } // ClientGetName returns the name of the connection. func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { cmd := NewStringCmd(ctx, "client", "getname") _ = c(ctx, cmd) return cmd } func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd { cmd := NewStringCmd(ctx, "echo", message) _ = c(ctx, cmd) return cmd } func (c cmdable) Ping(ctx context.Context) *StatusCmd { cmd := NewStatusCmd(ctx, "ping") _ = c(ctx, cmd) return cmd } func (c cmdable) Do(ctx context.Context, args ...interface{}) *Cmd { cmd := NewCmd(ctx, args...) _ = c(ctx, cmd) return cmd } func (c cmdable) Quit(_ context.Context) *StatusCmd { panic("not implemented") } //------------------------------------------------------------------------------ func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd { cmd := NewStatusCmd(ctx, "bgrewriteaof") _ = c(ctx, cmd) return cmd } func (c cmdable) BgSave(ctx context.Context) *StatusCmd { cmd := NewStatusCmd(ctx, "bgsave") _ = c(ctx, cmd) return cmd } func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { cmd := NewStatusCmd(ctx, "client", "kill", ipPort) _ = c(ctx, cmd) return cmd } // ClientKillByFilter is new style syntax, while the ClientKill is old // // CLIENT KILL