diff --git a/vendor.conf b/vendor.conf new file mode 100644 index 0000000..95402ca --- /dev/null +++ b/vendor.conf @@ -0,0 +1,8 @@ +github.com/segmentio/backo-go 204274ad699c0983a70203a566887f17a717fef4 +github.com/segmentio/conf v1.1.0 +github.com/segmentio/go-snakecase v1.1.0 +github.com/segmentio/objconv v1.0.1 +github.com/xtgo/uuid a0b114877d4caeffbd7f87e3757c17fce570fea7 +gopkg.in/go-playground/mold.v2 v2.2.0 +gopkg.in/validator.v2 135c24b11c19e52befcae2ec3fca5d9b78c4e98e +gopkg.in/yaml.v2 v2.2.2 diff --git a/vendor/github.com/segmentio/backo-go/.gitmodules b/vendor/github.com/segmentio/backo-go/.gitmodules deleted file mode 100644 index 36de929..0000000 --- a/vendor/github.com/segmentio/backo-go/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "vendor/github.com/bmizerany/assert"] - path = vendor/github.com/bmizerany/assert - url = https://github.com/bmizerany/assert diff --git a/vendor/github.com/segmentio/conf/LICENSE b/vendor/github.com/segmentio/conf/LICENSE new file mode 100644 index 0000000..9ba2b78 --- /dev/null +++ b/vendor/github.com/segmentio/conf/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/conf/README.md b/vendor/github.com/segmentio/conf/README.md new file mode 100644 index 0000000..3ca1124 --- /dev/null +++ b/vendor/github.com/segmentio/conf/README.md @@ -0,0 +1,231 @@ +# conf [![CircleCI](https://circleci.com/gh/segmentio/conf.svg?style=shield)](https://circleci.com/gh/segmentio/conf) [![Go Report Card](https://goreportcard.com/badge/github.com/segmentio/conf)](https://goreportcard.com/report/github.com/segmentio/conf) [![GoDoc](https://godoc.org/github.com/segmentio/conf?status.svg)](https://godoc.org/github.com/segmentio/conf) +Go package for loading program configuration from multiple sources. + +Motivations +----------- + +Loading program configurations is usually done by parsing the arguments passed +to the command line, and in this case the standard library offers a good support +with the `flag` package. +However, there are times where the standard is just too limiting, for example +when the program needs to load configuration from other sources (like a file, or +the environment variables). +The `conf` package was built to address these issues, here were the goals: + +- **Loading the configuration has to be type-safe**, there were other packages +available that were covering the same use-cases but they often required doing +type assertions on the configuration values which is always an opportunity to +get the program to panic. + +- **Keeping the API minimal**, while the `flag` package offered the type safety +we needed it is also very verbose to setup. With `conf`, only a single function +call is needed to setup and load the entire program configuration. + +- **Supporting richer syntaxes**, because program configurations are often +generated dynamically, the `conf` package accepts YAML values as input to all +configuration values. It also has support for sub-commands on the command line, +which is a common approach used by CLI tools. + +- **Supporting multiple sources**, because passing values through the command +line is not always the best appraoch, programs may need to receive their +configuration from files, environment variables, secret stores, or other network +locations. + +Basic Usage +----------- + +A program using the `conf` package needs to declare a struct which is passed to +`conf.Load` to populate the fields with the configuration that was made +available at runtime through a configuration file, environment variables or the +program arguments. + +Each field of the structure may declare a `conf` tag which sets the name of the +property, and a `help` tag to provide a help message for the configuration. + +The `conf` package will automatically understand the structure of the program +configuration based on the struct it receives, as well as generating the program +usage and help messages if the `-h` or `-help` options are passed (or an error +is detected). + +The `conf.Load` function adds support for a `-config-file` option on the program +arguments which accepts the path to a file that the configuration may be loaded +from as well. + +Here's an example of how a program would typically use the package: +```go +package main + +import ( + "fmt" + + "github.com/segmentio/conf" +) + +func main() { + var config struct { + Message string `conf:"m" help:"A message to print."` + } + + // Load the configuration, either from a config file, the environment or the program arguments. + conf.Load(&config) + + fmt.Println(config.Message) +} +``` +``` +$ go run ./example.go -m 'Hello World!' +Hello World! +``` + +Environment Variables +--------------------- + +By default, `conf` will look for environment variables before loading command-line configuration flags with one important caveat: environment variables are prefixed with the program name. For example, given a program named "foobar": + +``` +func main() { + config := struct { + Name string `conf:"name"` + }{ + Name: "default", + } + conf.Load(&config) + fmt.Println("Hello", config.Name) +} +``` + +The following will be output: + +``` +$ ./foobar // "Hello default" +$ FOOBAR_NAME=world ./foobar // "Hello world" +$ FOOBAR_NAME=world ./foobar --name neighbor // "Hello neighbor" +$ MAIN_NAME=world go run main.go // "Hello world" +``` + +If you want to hard-code the prefix to guarantee immutability or just to customize it, you can supply a custom loader config: + +``` +loader := conf.Loader{ + Name: "my-service", + Args: os.Args[1:], + Sources: []conf.Source{ + conf.NewEnvSource("MY_SVC", os.Environ()...), + }, +} +conf.LoadWith(&config, loader) +``` + +Advanced Usage +-------------- + +While the `conf.Load` function is good enough for common use cases, programs +sometimes need to customize the default behavior. +A program may then use the `conf.LoadWith` function, which accepts a +`conf.Loader` as second argument to gain more control over how the configuration +is loaded. + +Here's the `conf.Loader` definition: +```go +package conf + +type Loader struct { + Name string // program name + Usage string // program usage + Args []string // list of arguments + Commands []Command // list of commands + Sources []Source // list of sources to load configuration from. +} +``` + +The `conf.Load` function is actually just a wrapper around `conf.LoadWith` that +passes a default loader. The default loader gets the program name from the first +program argument, supports no sub-commands, and has two custom sources setup to +potentially load its configuration from a configuration file or the environment +variables. + +Here's an example showing how to configure a CLI tool that supports a couple of +sub-commands: +```go +package main + +import ( + "fmt" + + "github.com/segmentio/conf" +) + +func main() { + // If nil is passed instead of a configuration struct no arguments are + // parsed, only the command is extracted. + cmd, args := conf.LoadWith(nil, conf.Loader{ + Name: "example", + Args: os.Args[1:], + Commands: []conf.Command{ + {"print", "Print the message passed to -m"}, + {"version", "Show the program version"}, + }, + }) + + switch cmd { + case "print": + var config struct{ + Message string `conf:"m" help:"A message to print."` + } + + conf.LoadWith(&config, conf.Loader{ + Name: "example print", + Args: args, + }) + + fmt.Println(config.Message) + + case "version": + fmt.Println("1.2.3") + } +} +``` +``` +$ go run ./example.go version +1.2.3 +$ go run ./example.go print -m 'Hello World!' +Hello World! +``` + +Custom Sources +-------------- + +We mentionned the `conf.Loader` type supported setting custom sources that the +program configuration can be loaded from. Here's the the `conf.Source` interface +definition: +```go +package conf + +type Source interface { + Load(dst Map) +} +``` + +The source has a single method which receives a `conf.Map` value which is an +itermediate representation of the configuration struct that was received by the +loader. +The package uses this type internally as well for loading configuration values +from the program arguments, it can be seen as a reflective representiong of the +original value which exposes an API that is more convenient to use that having +a raw `reflect.Value`. + +One of the advantages of the `conf.Map` type is that it implements the +[objconv.ValueDecoder](https://godoc.org/github.com/segmentio/objconv#ValueDecoder) +interface and therefore can be used directly to load configurations from a +serialized format (like JSON for example). + +Validation +---------- + +Last but not least, the `conf` package also supports automatic validation of the +fields in the configuration struct. This happens after the values were loaded +and is based on [gopkg.in/validator.v2](https://godoc.org/gopkg.in/validator.v2). + +This step could have been done outside the package however it is both convenient +and useful to have all configuration errors treated the same way (getting the +usage and help message shown when something is wrong). diff --git a/vendor/github.com/segmentio/conf/doc.go b/vendor/github.com/segmentio/conf/doc.go new file mode 100644 index 0000000..f4cc6c8 --- /dev/null +++ b/vendor/github.com/segmentio/conf/doc.go @@ -0,0 +1,25 @@ +// Package conf package provides tools for easily loading program configurations +// from multiple sources such as the command line arguments, environment, or a +// configuration file. +// +// Most applications only need to use the Load function to get their settings +// loaded into an object. By default, Load will read from a configurable file +// defined by the -config-file command line argument, load values present in the +// environment, and finally load the program arguments. +// +// The object in which the configuration is loaded must be a struct, the names +// and types of its fields are introspected by the Load function to understand +// how to load the configuration. +// +// The name deduction from the struct field obeys the same rules than those +// implemented by the standard encoding/json package, which means the program +// can set the "conf" tag to override the default field names in the command +// line arguments and configuration file. +// +// A "help" tag may also be set on the fields of the configuration object to +// add documentation to the setting, which will be shown when the program is +// asked to print its help. +// +// When values are loaded from the environment the Load function looks for +// variables matching the struct fields names in snake-upper-case form. +package conf diff --git a/vendor/github.com/segmentio/conf/flag.go b/vendor/github.com/segmentio/conf/flag.go new file mode 100644 index 0000000..591a1e5 --- /dev/null +++ b/vendor/github.com/segmentio/conf/flag.go @@ -0,0 +1,24 @@ +package conf + +import ( + "flag" + "io/ioutil" + "strings" +) + +func newFlagSet(cfg Map, name string, sources ...Source) *flag.FlagSet { + set := flag.NewFlagSet(name, flag.ContinueOnError) + set.SetOutput(ioutil.Discard) + + cfg.Scan(func(path []string, item MapItem) { + set.Var(item.Value, strings.Join(append(path, item.Name), "."), item.Help) + }) + + for _, source := range sources { + if f, ok := source.(FlagSource); ok { + set.Var(f, f.Flag(), f.Help()) + } + } + + return set +} diff --git a/vendor/github.com/segmentio/conf/load.go b/vendor/github.com/segmentio/conf/load.go new file mode 100644 index 0000000..debbab7 --- /dev/null +++ b/vendor/github.com/segmentio/conf/load.go @@ -0,0 +1,300 @@ +package conf + +import ( + "bytes" + "context" + "errors" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + + "gopkg.in/go-playground/mold.v2/modifiers" + + validator "gopkg.in/validator.v2" + + // Load all default adapters of the objconv package. + _ "github.com/segmentio/objconv/adapters" + "github.com/segmentio/objconv/yaml" +) + +var ( + // Modifier is the default modification lib using the "mod" tag; it is + // exposed to allow registering of custom modifiers and aliases or to + // be set to a more central instance located in another repo. + Modifier = modifiers.New() +) + +// Load the program's configuration into cfg, and returns the list of leftover +// arguments. +// +// The cfg argument is expected to be a pointer to a struct type where exported +// fields or fields with a "conf" tag will be used to load the program +// configuration. +// The function panics if cfg is not a pointer to struct, or if it's a nil +// pointer. +// +// The configuration is loaded from the command line, environment and optional +// configuration file if the -config-file option is present in the program +// arguments. +// +// Values found in the progrma arguments take precedence over those found in +// the environment, which takes precedence over the configuration file. +// +// If an error is detected with the configurable the function print the usage +// message to stderr and exit with status code 1. +func Load(cfg interface{}) (args []string) { + _, args = LoadWith(cfg, defaultLoader(os.Args, os.Environ())) + return +} + +// LoadWith behaves like Load but uses ld as a loader to parse the program +// configuration. +// +// The function panics if cfg is not a pointer to struct, or if it's a nil +// pointer and no commands were set. +func LoadWith(cfg interface{}, ld Loader) (cmd string, args []string) { + var err error + switch cmd, args, err = ld.Load(cfg); err { + case nil: + case flag.ErrHelp: + ld.PrintHelp(cfg) + os.Exit(0) + default: + ld.PrintHelp(cfg) + ld.PrintError(err) + os.Exit(1) + } + return +} + +// A Command represents a command supported by a configuration loader. +type Command struct { + Name string // name of the command + Help string // help message describing what the command does +} + +// A Loader exposes an API for customizing how a configuration is loaded and +// where it's loaded from. +type Loader struct { + Name string // program name + Usage string // program usage + Args []string // list of arguments + Commands []Command // list of commands + Sources []Source // list of sources to load configuration from. +} + +// Load uses the loader ld to load the program configuration into cfg, and +// returns the list of program arguments that were not used. +// +// The function returns flag.ErrHelp when the list of arguments contained -h, +// -help, or --help. +// +// The cfg argument is expected to be a pointer to a struct type where exported +// fields or fields with a "conf" tag will be used to load the program +// configuration. +// The function panics if cfg is not a pointer to struct, or if it's a nil +// pointer and no commands were set. +func (ld Loader) Load(cfg interface{}) (cmd string, args []string, err error) { + var v reflect.Value + + if cfg == nil { + v = reflect.ValueOf(&struct{}{}) + } else { + v = reflect.ValueOf(cfg) + } + + if v.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot load configuration into non-pointer type: %T", cfg)) + } + + if v.IsNil() { + panic(fmt.Sprintf("cannot load configuration into nil pointer of type: %T", cfg)) + } + + if v = v.Elem(); v.Kind() != reflect.Struct { + panic(fmt.Sprintf("cannot load configuration into non-struct pointer: %T", cfg)) + } + + if len(ld.Commands) != 0 { + if len(ld.Args) == 0 { + err = errors.New("missing command") + return + } + + found := false + for _, c := range ld.Commands { + if c.Name == ld.Args[0] { + found, cmd, ld.Args = true, ld.Args[0], ld.Args[1:] + break + } + } + + if !found { + err = errors.New("unknown command: " + ld.Args[0]) + return + } + + if cfg == nil { + args = ld.Args + return + } + } + + if args, err = ld.load(v); err != nil { + return + } + + if err = Modifier.Struct(context.Background(), cfg); err != nil { + return + } + + if err = validator.Validate(v.Interface()); err != nil { + err = makeValidationError(err, v.Type()) + } + + return +} + +func (ld Loader) load(cfg reflect.Value) (args []string, err error) { + node := makeNodeStruct(cfg, cfg.Type()) + set := newFlagSet(node, ld.Name, ld.Sources...) + + // Parse the arguments a first time so the sources that implement the + // FlagSource interface get their values loaded. + if err = set.Parse(ld.Args); err != nil { + return + } + + // Load the configuration from the sources that have been configured on the + // loader. + // Order is important here because the values will get overwritten by each + // source that loads the configuration. + for _, source := range ld.Sources { + if err = source.Load(node); err != nil { + return + } + } + + // Parse the arguments a second time to overwrite values loaded by sources + // which were also passed to the program arguments. + if err = set.Parse(ld.Args); err != nil { + return + } + + args = set.Args() + return +} + +func defaultLoader(args []string, env []string) Loader { + var name = filepath.Base(args[0]) + return Loader{ + Name: name, + Args: args[1:], + Sources: []Source{ + NewFileSource("config-file", makeEnvVars(env), ioutil.ReadFile, yaml.Unmarshal), + NewEnvSource(name, env...), + }, + } +} + +func makeEnvVars(env []string) (vars map[string]string) { + vars = make(map[string]string) + + for _, e := range env { + var k string + var v string + + if off := strings.IndexByte(e, '='); off >= 0 { + k, v = e[:off], e[off+1:] + } else { + k = e + } + + vars[k] = v + } + + return vars +} + +func makeValidationError(err error, typ reflect.Type) error { + if errmap, ok := err.(validator.ErrorMap); ok { + errkeys := make([]string, 0, len(errmap)) + errlist := make(errorList, 0, len(errmap)) + + for errkey := range errmap { + errkeys = append(errkeys, errkey) + } + + sort.Strings(errkeys) + + for _, errkey := range errkeys { + path := fieldPath(typ, errkey) + + if len(errmap[errkey]) == 1 { + errlist = append(errlist, fmt.Errorf("invalid value passed to %s: %s", path, errmap[errkey][0])) + } else { + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "invalid value passed to %s: ", path) + + for i, errval := range errmap[errkey] { + if i != 0 { + buf.WriteString("; ") + } + buf.WriteString(errval.Error()) + } + + errlist = append(errlist, errors.New(buf.String())) + } + } + + err = errlist + } + return err +} + +type errorList []error + +func (err errorList) Error() string { + if len(err) > 0 { + return err[0].Error() + } + return "" +} + +func fieldPath(typ reflect.Type, path string) string { + var name string + + if sep := strings.IndexByte(path, '.'); sep >= 0 { + name, path = path[:sep], path[sep+1:] + } else { + name, path = path, "" + } + + if field, ok := typ.FieldByName(name); ok { + name = field.Tag.Get("conf") + if len(name) == 0 { + name = field.Name + } else if name == "_" { + name = "" + } + + if len(path) != 0 { + path = fieldPath(field.Type, path) + } + } + + if len(path) != 0 { + if len(name) == 0 { + name = path + } else { + name += "." + path + } + } + + return name +} diff --git a/vendor/github.com/segmentio/conf/node.go b/vendor/github.com/segmentio/conf/node.go new file mode 100644 index 0000000..192420f --- /dev/null +++ b/vendor/github.com/segmentio/conf/node.go @@ -0,0 +1,651 @@ +package conf + +import ( + "bytes" + "encoding" + "flag" + "fmt" + "reflect" + "sort" + "time" + + "github.com/segmentio/objconv" + "github.com/segmentio/objconv/json" + "github.com/segmentio/objconv/yaml" +) + +// NodeKind is an enumeration which describes the different types of nodes that +// are supported in a configuration. +type NodeKind int + +const ( + // ScalarNode represents configuration nodes of type Scalar. + ScalarNode NodeKind = iota + + // ArrayNode represents configuration nodes of type Array. + ArrayNode + + // MapNode represents configuration nodes of type Map. + MapNode +) + +// The Node interface defines the common interface supported by the different +// types of configuration nodes supported by the conf package. +type Node interface { + flag.Value + objconv.ValueEncoder + objconv.ValueDecoder + + // Kind returns the NodeKind of the configuration node. + Kind() NodeKind + + // Value returns the underlying value wrapped by the configuration node. + Value() interface{} +} + +// EqualNode compares n1 and n2, returning true if they are deeply equal. +func EqualNode(n1 Node, n2 Node) bool { + if n1 == nil || n2 == nil { + return n1 == n2 + } + + k1 := n1.Kind() + k2 := n2.Kind() + + if k1 != k2 { + return false + } + + switch k1 { + case ArrayNode: + return equalNodeArray(n1.(Array), n2.(Array)) + case MapNode: + return equalNodeMap(n1.(Map), n2.(Map)) + default: + return equalNodeScalar(n1.(Scalar), n2.(Scalar)) + } +} + +func equalNodeArray(a1 Array, a2 Array) bool { + n1 := a1.Len() + n2 := a2.Len() + + if n1 != n2 { + return false + } + + for i := 0; i != n1; i++ { + if !EqualNode(a1.Item(i), a2.Item(i)) { + return false + } + } + + return true +} + +func equalNodeMap(m1 Map, m2 Map) bool { + n1 := m1.Len() + n2 := m2.Len() + + if n1 != n2 { + return false + } + + for _, item := range m1.Items() { + if !EqualNode(item.Value, m2.Item(item.Name)) { + return false + } + } + + return true +} + +func equalNodeScalar(s1 Scalar, s2 Scalar) bool { + v1 := s1.value.IsValid() + v2 := s2.value.IsValid() + + if !v1 || !v2 { + return v1 == v2 + } + + t1 := s1.value.Type() + t2 := s2.value.Type() + + if t1 != t2 { + return false + } + + switch t1 { + case timeTimeType: + return s1.Value().(time.Time).Equal(s2.Value().(time.Time)) + } + + return reflect.DeepEqual(s1.Value(), s2.Value()) +} + +// MakeNode builds a Node from the value v. +// +// The function panics if v contains unrepresentable values. +func MakeNode(v interface{}) Node { + return makeNode(reflect.ValueOf(v)) +} + +func makeNode(v reflect.Value) Node { + if !v.IsValid() { + return makeNodeScalar(v) + } + + t := v.Type() + + switch t { + case timeTimeType, timeDurationType: + return makeNodeScalar(v) + } + + if _, ok := objconv.AdapterOf(t); ok { + return makeNodeScalar(v) + } + + switch { + case + t.Implements(objconvValueDecoderInterface), + t.Implements(textUnmarshalerInterface): + return makeNodeScalar(v) + } + + switch t.Kind() { + case reflect.Array, reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Interface: + panic("unsupported type found in configuration: " + t.String()) + + case reflect.Struct: + return makeNodeStruct(v, t) + + case reflect.Map: + return makeNodeMap(v, t) + + case reflect.Slice: + return makeNodeSlice(v, t) + + case reflect.Ptr: + return makeNodePtr(v, t) + + default: + return makeNodeScalar(v) + } +} + +func makeNodeStruct(v reflect.Value, t reflect.Type) (m Map) { + m.value = v + m.items = newMapItems() + + populateNodeStruct(t, t.Name(), v, t, m) + + // if using the "_" notation to embed structs, it's possible that names are no longer unique. + props := make(map[string]struct{}) + for _, item := range m.Items() { + if _, ok := props[item.Name]; ok { + panic("duplicate name '" + item.Name + "' found after collapsing embedded structs in configuration: " + t.String()) + } + props[item.Name] = struct{}{} + } + + return +} + +// populateNodeStruct is the mutually recursive helper of makeNodeStruct to create the node struct with potentially +// embedded types. It will populate m with the struct fields from v. The original type and path of the current field +// are passed in order to create decent panic strings if an invalid configuration is detected. +func populateNodeStruct(originalT reflect.Type, path string, v reflect.Value, t reflect.Type, m Map) { + + for i, n := 0, v.NumField(); i != n; i++ { + fv := v.Field(i) + ft := t.Field(i) + + if !isExported(ft) { + continue + } + + name, help := ft.Tag.Get("conf"), ft.Tag.Get("help") + switch name { + case "-": + continue + case "_": + path = path + "." + ft.Name + if ft.Type.Kind() != reflect.Struct || !ft.Anonymous { + panic("found \"_\" on invalid type at path " + path + " in configuration: " + originalT.Name()) + } + populateNodeStruct(originalT, path, fv, ft.Type, m) + continue + case "": + name = ft.Name + } + + m.items.push(MapItem{ + Name: name, + Help: help, + Value: makeNode(fv), + }) + } +} + +func makeNodeMap(v reflect.Value, t reflect.Type) (m Map) { + if v.IsNil() && v.CanSet() { + v.Set(reflect.MakeMap(v.Type())) + } + + m.value = v + m.items = newMapItems() + + for _, key := range v.MapKeys() { + m.items.push(MapItem{ + Name: key.String(), // only string keys are supported for now + Value: makeNode(v.MapIndex(key)), + }) + } + + sort.Sort(m.items) + return +} + +func makeNodeSlice(v reflect.Value, t reflect.Type) (a Array) { + n := v.Len() + a.value = v + a.items = newArrayItems() + + for i := 0; i != n; i++ { + a.items.push(makeNode(v.Index(i))) + } + + return +} + +func makeNodePtr(v reflect.Value, t reflect.Type) Node { + if v.IsNil() { + p := reflect.New(t.Elem()) + + if v.CanSet() { + v.Set(p) + } + + v = p + } + return makeNode(v.Elem()) +} + +func makeNodeScalar(value reflect.Value) (s Scalar) { + s.value = value + return +} + +func isExported(f reflect.StructField) bool { + return len(f.PkgPath) == 0 +} + +// A Scalar is a node type that wraps a basic value. +type Scalar struct { + value reflect.Value +} + +func (s Scalar) Kind() NodeKind { + return ScalarNode +} + +func (s Scalar) Value() interface{} { + if !s.value.IsValid() { + return nil + } + return s.value.Interface() +} + +func (s Scalar) String() string { + b, _ := yaml.Marshal(s) + return string(bytes.TrimSpace(b)) +} + +func (s Scalar) Set(str string) (err error) { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("%s", x) + } + }() + ptr := s.value.Addr().Interface() + + if err = yaml.Unmarshal([]byte(str), ptr); err != nil { + if b, _ := json.Marshal(str); b != nil { + if json.Unmarshal(b, ptr) == nil { + err = nil + } + } + } + + return +} + +func (s Scalar) EncodeValue(e objconv.Encoder) error { + return e.Encode(s.Value()) +} + +func (s Scalar) DecodeValue(d objconv.Decoder) error { + return d.Decode(s.value.Addr().Interface()) +} + +func (s Scalar) IsBoolFlag() bool { + return s.value.IsValid() && s.value.Kind() == reflect.Bool +} + +// Array is a node type that wraps a slice value. +type Array struct { + value reflect.Value + items *arrayItems +} + +func (a Array) Kind() NodeKind { + return ArrayNode +} + +func (a Array) Value() interface{} { + if !a.value.IsValid() { + return nil + } + return a.value.Interface() +} + +func (a Array) Items() []Node { + if a.items == nil { + return nil + } + return a.items.items() +} + +func (a Array) Item(i int) Node { + return a.items.index(i) +} + +func (a Array) Len() int { + if a.items == nil { + return 0 + } + return a.items.len() +} + +func (a Array) String() string { + if a.Len() == 0 { + return "[ ]" + } + b := &bytes.Buffer{} + b.WriteByte('[') + + for i, item := range a.Items() { + if i != 0 { + b.WriteString(", ") + } + b.WriteString(item.String()) + } + + b.WriteByte(']') + return b.String() +} + +func (a Array) Set(s string) error { + return yaml.Unmarshal([]byte(s), a) +} + +func (a Array) EncodeValue(e objconv.Encoder) (err error) { + i := 0 + return e.EncodeArray(a.Len(), func(e objconv.Encoder) (err error) { + if err = a.Item(i).EncodeValue(e); err != nil { + return + } + i++ + return + }) +} + +func (a Array) DecodeValue(d objconv.Decoder) (err error) { + a.pop(a.Len()) + return d.DecodeArray(func(d objconv.Decoder) (err error) { + if err = a.push().DecodeValue(d); err != nil { + a.pop(1) + } + return + }) +} + +func (a Array) push() Node { + i := a.Len() + a.value.Set(reflect.Append(a.value, reflect.Zero(a.value.Type().Elem()))) + a.items.push(makeNode(a.value.Index(i))) + return a.items.index(i) +} + +func (a Array) pop(n int) { + if n != 0 { + a.value.Set(a.value.Slice(0, a.Len()-n)) + a.items.pop(n) + } +} + +// Map is a map type that wraps a map or struct value. +type Map struct { + value reflect.Value + items *mapItems +} + +// MapItem is the type of elements stored in a Map. +type MapItem struct { + Name string + Help string + Value Node +} + +func (m Map) Kind() NodeKind { + return MapNode +} + +func (m Map) Value() interface{} { + if !m.value.IsValid() { + return nil + } + return m.value.Interface() +} + +func (m Map) Items() []MapItem { + if m.items == nil { + return nil + } + return m.items.items() +} + +func (m Map) Item(name string) Node { + if m.items == nil { + return nil + } + return m.items.get(name) +} + +func (m Map) Len() int { + if m.items == nil { + return 0 + } + return m.items.len() +} + +func (m Map) String() string { + if m.Len() == 0 { + return "{ }" + } + + b := &bytes.Buffer{} + b.WriteString("{ ") + + for i, item := range m.Items() { + if i != 0 { + b.WriteString(", ") + } + fmt.Fprintf(b, "%s: %s", item.Name, item.Value) + + if len(item.Help) != 0 { + fmt.Fprintf(b, " (%s)", item.Help) + } + } + + b.WriteString(" }") + return b.String() +} + +func (m Map) Set(s string) error { + return yaml.Unmarshal([]byte(s), m) +} + +func (m Map) EncodeValue(e objconv.Encoder) error { + i := 0 + return e.EncodeMap(m.Len(), func(ke objconv.Encoder, ve objconv.Encoder) (err error) { + item := &m.items.nodes[i] + if err = ke.Encode(item.Name); err != nil { + return + } + if err = item.Value.EncodeValue(ve); err != nil { + return + } + i++ + return + }) +} + +func (m Map) DecodeValue(d objconv.Decoder) error { + return d.DecodeMap(func(kd objconv.Decoder, vd objconv.Decoder) (err error) { + var key string + + if err = kd.Decode(&key); err != nil { + return + } + + if m.value.Kind() == reflect.Struct { + if item := m.Item(key); item != nil { + return item.DecodeValue(vd) + } + return vd.Decode(nil) // discard + } + + name := reflect.ValueOf(key) + node := makeNode(reflect.New(m.value.Type().Elem())) + + if err = node.DecodeValue(vd); err != nil { + return + } + + m.value.SetMapIndex(name, reflect.ValueOf(node.Value())) + m.items.put(MapItem{ + Name: key, + Value: makeNode(m.value.MapIndex(name)), + }) + return + }) +} + +func (m Map) Scan(do func([]string, MapItem)) { + m.scan(make([]string, 0, 10), do) +} + +func (m Map) scan(path []string, do func([]string, MapItem)) { + for _, item := range m.Items() { + do(path, item) + + switch v := item.Value.(type) { + case Map: + v.scan(append(path, item.Name), do) + } + } +} + +type arrayItems struct { + nodes []Node +} + +func newArrayItems(nodes ...Node) *arrayItems { + return &arrayItems{nodes} +} + +func (a *arrayItems) push(n Node) { + a.nodes = append(a.nodes, n) +} + +func (a *arrayItems) pop(n int) { + a.nodes = a.nodes[:len(a.nodes)-n] +} + +func (a *arrayItems) len() int { + return len(a.nodes) +} + +func (a *arrayItems) index(i int) Node { + return a.nodes[i] +} + +func (a *arrayItems) items() []Node { + return a.nodes +} + +type mapItems struct { + nodes []MapItem +} + +func newMapItems(nodes ...MapItem) *mapItems { + return &mapItems{nodes} +} + +func (m *mapItems) get(name string) Node { + if i := m.index(name); i >= 0 { + return m.nodes[i].Value + } + return nil +} + +func (m *mapItems) index(name string) int { + for i, node := range m.nodes { + if node.Name == name { + return i + } + } + return -1 +} + +func (m *mapItems) len() int { + return len(m.nodes) +} + +func (m *mapItems) items() []MapItem { + return m.nodes +} + +func (m *mapItems) push(item MapItem) { + m.nodes = append(m.nodes, item) +} + +func (m *mapItems) put(item MapItem) { + if i := m.index(item.Name); i >= 0 { + m.nodes[i] = item + } else { + m.push(item) + } +} + +func (m *mapItems) Less(i int, j int) bool { + return m.nodes[i].Name < m.nodes[j].Name +} + +func (m *mapItems) Swap(i int, j int) { + m.nodes[i], m.nodes[j] = m.nodes[j], m.nodes[i] +} + +func (m *mapItems) Len() int { + return len(m.nodes) +} + +var ( + timeTimeType = reflect.TypeOf(time.Time{}) + timeDurationType = reflect.TypeOf(time.Duration(0)) + + objconvValueDecoderInterface = reflect.TypeOf((*objconv.ValueDecoder)(nil)).Elem() + textUnmarshalerInterface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) diff --git a/vendor/github.com/segmentio/conf/pprof.go b/vendor/github.com/segmentio/conf/pprof.go new file mode 100644 index 0000000..5eeacfb --- /dev/null +++ b/vendor/github.com/segmentio/conf/pprof.go @@ -0,0 +1,34 @@ +package conf + +import "runtime" + +// PPROF is a configuration struct which can be used to configure the runtime +// profilers of programs. +// +// config := struct{ +// PPROF `conf:"pprof"` +// }{ +// PPROF: conf.DefaultPPROF(), +// } +// conf.Load(&config) +// conf.SetPPROF(config.PPROF) +// +type PPROF struct { + BlockProfileRate int `conf:"block-profile-rate" help:"Sets the block profile rate to enable runtime profiling of blocking operations, zero disables block profiling." validate:"min=0"` + MutexProfileFraction int `conf:"mutex-profile-fraction" help:"Sets the mutex profile fraction to enable runtime profiling of lock contention, zero disables mutex profiling." validate:"min=0"` +} + +// DefaultPPROF returns the default value of a PPROF struct. Note that the +// zero-value is valid, DefaultPPROF differs because it captures the current +// configuration of the program's runtime. +func DefaultPPROF() PPROF { + return PPROF{ + MutexProfileFraction: runtime.SetMutexProfileFraction(-1), + } +} + +// SetPPROF configures the runtime profilers based on the given PPROF config. +func SetPPROF(config PPROF) { + runtime.SetBlockProfileRate(config.BlockProfileRate) + runtime.SetMutexProfileFraction(config.MutexProfileFraction) +} diff --git a/vendor/github.com/segmentio/conf/print.go b/vendor/github.com/segmentio/conf/print.go new file mode 100644 index 0000000..9ec9ebc --- /dev/null +++ b/vendor/github.com/segmentio/conf/print.go @@ -0,0 +1,300 @@ +package conf + +import ( + "bufio" + "flag" + "fmt" + "io" + "os" + "reflect" + "strings" + + "github.com/segmentio/objconv" +) + +// PrintError outputs the error message for err to stderr. +func (ld Loader) PrintError(err error) { + w := bufio.NewWriter(os.Stderr) + ld.fprintError(w, err, stderr()) + w.Flush() +} + +// FprintError outputs the error message for err to w. +func (ld Loader) FprintError(w io.Writer, err error) { + ld.fprintError(w, err, monochrome()) +} + +// PrintHelp outputs the help message for cfg to stderr. +func (ld Loader) PrintHelp(cfg interface{}) { + w := bufio.NewWriter(os.Stderr) + ld.fprintHelp(w, cfg, stderr()) + w.Flush() +} + +// FprintHelp outputs the help message for cfg to w. +func (ld Loader) FprintHelp(w io.Writer, cfg interface{}) { + ld.fprintHelp(w, cfg, monochrome()) +} + +func (ld Loader) fprintError(w io.Writer, err error, col colors) { + var errors errorList + + if e, ok := err.(errorList); ok { + errors = e + } else { + errors = errorList{err} + } + + fmt.Fprintf(w, "%s\n", col.titles("Error:")) + + for _, e := range errors { + fmt.Fprintf(w, " %s\n", col.errors(e.Error())) + } + + fmt.Fprintln(w) +} + +func (ld Loader) fprintHelp(w io.Writer, cfg interface{}, col colors) { + var m Map + + if cfg != nil { + v := reflect.ValueOf(cfg) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + m = makeNodeStruct(v, v.Type()) + } + + fmt.Fprintf(w, "%s\n", col.titles("Usage:")) + switch { + case len(ld.Usage) != 0: + fmt.Fprintf(w, " %s %s\n\n", ld.Name, ld.Usage) + case len(ld.Commands) != 0: + fmt.Fprintf(w, " %s [command] [options...]\n\n", ld.Name) + default: + fmt.Fprintf(w, " %s [-h] [-help] [options...]\n\n", ld.Name) + } + + if len(ld.Commands) != 0 { + fmt.Fprintf(w, "%s\n", col.titles("Commands:")) + width := 0 + + for _, c := range ld.Commands { + if n := len(col.cmds(c.Name)); n > width { + width = n + } + } + + cmdfmt := fmt.Sprintf(" %%-%ds %%s\n", width) + + for _, c := range ld.Commands { + fmt.Fprintf(w, cmdfmt, col.cmds(c.Name), c.Help) + } + + fmt.Fprintln(w) + } + + set := newFlagSet(m, ld.Name, ld.Sources...) + if m.Len() != 0 { + fmt.Fprintf(w, "%s\n", col.titles("Options:")) + } + + // Outputs the flags following the same format than the standard flag + // package. The main difference is in the type names which are set to + // values returned by prettyType. + set.VisitAll(func(f *flag.Flag) { + var t string + var h []string + var empty bool + var boolean bool + var object bool + var list bool + + switch v := f.Value.(type) { + case Node: + x := reflect.ValueOf(v.Value()) + t = prettyType(x.Type()) + empty = isEmptyValue(x) + + switch v.(type) { + case Map: + object = true + case Array: + list = true + default: + boolean = isBoolFlag(x) + } + + case FlagSource: + t = "source" + default: + t = "value" + } + + fmt.Fprintf(w, " %s", col.keys("-"+f.Name)) + + switch { + case !boolean: + fmt.Fprintf(w, " %s\n", col.types(t)) + case len(f.Name) >= 4: // put help message inline for boolean flags + fmt.Fprint(w, "\n") + } + + if s := f.Usage; len(s) != 0 { + h = append(h, s) + } + + if s := f.DefValue; len(s) != 0 && !empty && !(boolean || object || list) { + h = append(h, col.defvals("(default "+s+")")) + } + + if len(h) != 0 { + if !boolean || len(f.Name) >= 4 { + fmt.Fprint(w, " ") + } + fmt.Fprintf(w, "\t%s\n", strings.Join(h, " ")) + } + + fmt.Fprint(w, "\n") + }) +} + +func prettyType(t reflect.Type) string { + if t == nil { + return "unknown" + } + + if _, ok := objconv.AdapterOf(t); ok { + return "value" + } + + switch { + case t.Implements(objconvValueDecoderInterface): + return "value" + case t.Implements(textUnmarshalerInterface): + return "string" + } + + switch t { + case timeDurationType: + return "duration" + case timeTimeType: + return "time" + } + + switch t.Kind() { + case reflect.Struct, reflect.Map: + return "object" + case reflect.Slice, reflect.Array: + if t.Elem().Kind() == reflect.Uint8 { + return "base64" + } + return "list" + case reflect.Ptr: + return prettyType(t.Elem()) + default: + s := strings.ToLower(t.String()) + if i := strings.LastIndexByte(s, '.'); i >= 0 { + s = s[i+1:] + } + return s + } +} + +type colors struct { + titles func(string) string + cmds func(string) string + keys func(string) string + types func(string) string + defvals func(string) string + errors func(string) string +} + +func stderr() colors { + if isTerminal(2) { + return colorized() + } + return monochrome() +} + +func colorized() colors { + return colors{ + titles: bold, + cmds: magenta, + keys: blue, + types: green, + defvals: grey, + errors: red, + } +} + +func monochrome() colors { + return colors{ + titles: normal, + cmds: normal, + keys: normal, + types: normal, + defvals: normal, + errors: normal, + } +} + +func bold(s string) string { + return "\033[1m" + s + "\033[0m" +} + +func blue(s string) string { + return "\033[1;34m" + s + "\033[0m" +} + +func green(s string) string { + return "\033[1;32m" + s + "\033[0m" +} + +func red(s string) string { + return "\033[1;31m" + s + "\033[0m" +} + +func magenta(s string) string { + return "\033[1;35m" + s + "\033[0m" +} + +func grey(s string) string { + return "\033[1;30m" + s + "\033[0m" +} + +func normal(s string) string { + return s +} + +func isEmptyValue(v reflect.Value) bool { + if !v.IsValid() { + return true + } + + switch v.Kind() { + case reflect.Slice, reflect.Map: + return v.Len() == 0 + + case reflect.Struct: + return v.NumField() == 0 + } + + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} + +func isBoolFlag(v reflect.Value) bool { + type iface interface { + IsBoolFlag() bool + } + + if !v.IsValid() { + return false + } + + if x, ok := v.Interface().(iface); ok { + return x.IsBoolFlag() + } + + return v.Kind() == reflect.Bool +} diff --git a/vendor/github.com/segmentio/conf/snakecase.go b/vendor/github.com/segmentio/conf/snakecase.go new file mode 100644 index 0000000..5769cc8 --- /dev/null +++ b/vendor/github.com/segmentio/conf/snakecase.go @@ -0,0 +1,84 @@ +package conf + +import "strings" + +func snakecaseLower(s string) string { + return strings.ToLower(snakecase(s)) +} + +func snakecaseUpper(s string) string { + return strings.ToUpper(snakecase(s)) +} + +func snakecase(s string) string { + b := make([]byte, 0, 64) + i := len(s) - 1 + + // search sequences, starting from the end of the string + for i >= 0 { + switch { + case isLower(s[i]): // sequence of lowercase, maybe starting with an uppercase + for i >= 0 && !isSeparator(s[i]) && !isUpper(s[i]) { + b = append(b, s[i]) + i-- + } + + if i >= 0 { + b = append(b, snakebyte(s[i])) + i-- + if isSeparator(s[i+1]) { // avoid double underscore if we have "_word" + continue + } + } + + if i >= 0 && !isSeparator(s[i]) { // avoid double underscores if we have "_Word" + b = append(b, '_') + } + + case isUpper(s[i]): // sequence of uppercase + for i >= 0 && !isSeparator(s[i]) && !isLower(s[i]) { + b = append(b, s[i]) + i-- + } + + if i >= 0 { + if isSeparator(s[i]) { + i-- + } + b = append(b, '_') + } + + default: // not a letter, it'll be part of the next sequence + b = append(b, snakebyte(s[i])) + i-- + } + } + + // reverse + for i, j := 0, len(b)-1; i < j; { + b[i], b[j] = b[j], b[i] + i++ + j-- + } + + return string(b) +} + +func snakebyte(b byte) byte { + if isSeparator(b) { + return '_' + } + return b +} + +func isSeparator(c byte) bool { + return c == '_' || c == '-' +} + +func isUpper(c byte) bool { + return c >= 'A' && c <= 'Z' +} + +func isLower(c byte) bool { + return c >= 'a' && c <= 'z' +} diff --git a/vendor/github.com/segmentio/conf/source.go b/vendor/github.com/segmentio/conf/source.go new file mode 100644 index 0000000..91e05cd --- /dev/null +++ b/vendor/github.com/segmentio/conf/source.go @@ -0,0 +1,159 @@ +package conf + +import ( + "bytes" + "flag" + "strings" + "text/template" + + "github.com/segmentio/objconv/json" +) + +// Source is the interface that allow new types to be plugged into a loader to +// make it possible to load configuration from new places. +// +// When the configuration is loaded the Load method of each source that was set +// on a loader is called with an Node representating the configuration struct. +// The typical implementation of a source is to load the serialized version of +// the configuration and use an objconv decoder to build the node. +type Source interface { + Load(dst Map) error +} + +// FlagSource is a special case of a source that receives a configuration value +// from the arguments of a loader. It makes it possible to provide runtime +// configuration to the source from the command line arguments of a program. +type FlagSource interface { + Source + + // Flag is the name of the flag that sets the source's configuration value. + Flag() string + + // Help is called to get the help message to display for the source's flag. + Help() string + + // flag.Value must be implemented by a FlagSource to receive their value + // when the loader's arguments are parsed. + flag.Value +} + +// SourceFunc makes it possible to use basic function types as configuration +// sources. +type SourceFunc func(dst Map) error + +// Load calls f. +func (f SourceFunc) Load(dst Map) error { + return f(dst) +} + +// NewEnvSource creates a new source which loads values from the environment +// variables given in env. +// +// A prefix may be set to namespace the environment variables that the source +// will be looking at. +func NewEnvSource(prefix string, env ...string) Source { + vars := makeEnvVars(env) + base := make([]string, 0, 10) + + if prefix != "" { + base = append(base, prefix) + } + + return SourceFunc(func(dst Map) (err error) { + dst.Scan(func(path []string, item MapItem) { + path = append(base, path...) + path = append(path, item.Name) + + k := snakecaseUpper(strings.Join(path, "_")) + + if v, ok := vars[k]; ok { + if e := item.Value.Set(v); e != nil { + err = e + } + } + }) + return + }) +} + +// NewFileSource creates a new source which loads a configuration from a file +// identified by a path (or URL). +// +// The returned source satisfies the FlagSource interface because it loads the +// file location from the given flag. +// +// The vars argument may be set to render the configuration file if it's a +// template. +// +// The readFile function loads the file content in-memory from a file location +// given as argument, usually this is ioutil.ReadFile. +// +// The unmarshal function decodes the content of the configuration file into a +// configuration object. +func NewFileSource(flag string, vars interface{}, readFile func(string) ([]byte, error), unmarshal func([]byte, interface{}) error) FlagSource { + return &fileSource{ + flag: flag, + vars: vars, + readFile: readFile, + unmarshal: unmarshal, + } +} + +type fileSource struct { + flag string + path string + vars interface{} + readFile func(string) ([]byte, error) + unmarshal func([]byte, interface{}) error +} + +func (f *fileSource) Load(dst Map) (err error) { + var b []byte + + if len(f.path) == 0 { + return + } + + if b, err = f.readFile(f.path); err != nil { + return + } + + tpl := template.New(f.flag) + buf := &bytes.Buffer{} + buf.Grow(len(b)) + + tpl = tpl.Funcs(template.FuncMap{ + "json": func(v interface{}) (string, error) { + b, err := json.Marshal(v) + return string(b), err + }, + }) + + if _, err = tpl.Parse(string(b)); err != nil { + return + } + + if err = tpl.Execute(buf, f.vars); err != nil { + return + } + + err = f.unmarshal(buf.Bytes(), dst) + return +} + +func (f *fileSource) Flag() string { + return f.flag +} + +func (f *fileSource) Help() string { + return "Location to load the configuration file from." +} + +func (f *fileSource) Set(s string) error { + f.path = s + return nil +} + +func (f *fileSource) String() string { + return f.path +} diff --git a/vendor/github.com/segmentio/conf/terminal.go b/vendor/github.com/segmentio/conf/terminal.go new file mode 100644 index 0000000..9e471d6 --- /dev/null +++ b/vendor/github.com/segmentio/conf/terminal.go @@ -0,0 +1,19 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +package conf + +import ( + "syscall" + "unsafe" +) + +// isTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/segmentio/conf/terminal_bsd.go b/vendor/github.com/segmentio/conf/terminal_bsd.go new file mode 100644 index 0000000..b413d62 --- /dev/null +++ b/vendor/github.com/segmentio/conf/terminal_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package conf + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/github.com/segmentio/conf/terminal_linux.go b/vendor/github.com/segmentio/conf/terminal_linux.go new file mode 100644 index 0000000..3b14cd8 --- /dev/null +++ b/vendor/github.com/segmentio/conf/terminal_linux.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package conf + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/github.com/segmentio/conf/terminal_plan9.go b/vendor/github.com/segmentio/conf/terminal_plan9.go new file mode 100644 index 0000000..95a3110 --- /dev/null +++ b/vendor/github.com/segmentio/conf/terminal_plan9.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package conf + +// isTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/segmentio/conf/terminal_windows.go b/vendor/github.com/segmentio/conf/terminal_windows.go new file mode 100644 index 0000000..e027675 --- /dev/null +++ b/vendor/github.com/segmentio/conf/terminal_windows.go @@ -0,0 +1,48 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package conf + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +type ( + short int16 + word uint16 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +// isTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/segmentio/go-snakecase/Readme.md b/vendor/github.com/segmentio/go-snakecase/Readme.md new file mode 100644 index 0000000..f96b192 --- /dev/null +++ b/vendor/github.com/segmentio/go-snakecase/Readme.md @@ -0,0 +1,10 @@ + +# go-snakecase + + [![Build Status](https://travis-ci.org/segmentio/go-snakecase.svg?branch=master)](https://travis-ci.org/segmentio/go-snakecase) + + Fast snakecase implementation, believe it or not this was a large bottleneck in our application, Go's regexps are very slow. + +# License + + MIT \ No newline at end of file diff --git a/vendor/github.com/segmentio/go-snakecase/snake.go b/vendor/github.com/segmentio/go-snakecase/snake.go new file mode 100644 index 0000000..03fe0d4 --- /dev/null +++ b/vendor/github.com/segmentio/go-snakecase/snake.go @@ -0,0 +1,81 @@ +// +// Fast snake-case implementation. +// +package snakecase + +// Snakecase the given string. +func Snakecase(s string) string { + b := make([]byte, 0, 64) + l := len(s) + i := 0 + + // loop until we reached the end of the string + for i < l { + + // skip leading bytes that aren't letters or numbers + for i < l && !isWord(s[i]) { + i++ + } + + if i < l && len(b) != 0 { + b = append(b, '_') + } + + // Append all leading uppercase or digits + for i < l { + if c := s[i]; !isHead(c) { + break + } else { + b = append(b, toLower(c)) + } + i++ + } + + // Append all trailing lowercase or digits + for i < l { + if c := s[i]; !isTail(c) { + break + } else { + b = append(b, c) + } + i++ + } + } + + return string(b) +} + +func isHead(c byte) bool { + return isUpper(c) || isDigit(c) +} + +func isTail(c byte) bool { + return isLower(c) || isDigit(c) +} + +func isWord(c byte) bool { + return isLetter(c) || isDigit(c) +} + +func isLetter(c byte) bool { + return isLower(c) || isUpper(c) +} + +func isUpper(c byte) bool { + return c >= 'A' && c <= 'Z' +} + +func isLower(c byte) bool { + return c >= 'a' && c <= 'z' +} + +func isDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func toLower(c byte) byte { + if isUpper(c) { + return c + ('a' - 'A') + } + return c +} diff --git a/vendor/github.com/segmentio/objconv/LICENSE b/vendor/github.com/segmentio/objconv/LICENSE new file mode 100644 index 0000000..9ba2b78 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/objconv/README.md b/vendor/github.com/segmentio/objconv/README.md new file mode 100644 index 0000000..411cde1 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/README.md @@ -0,0 +1,204 @@ +objconv [![CircleCI](https://circleci.com/gh/segmentio/objconv.svg?style=shield)](https://circleci.com/gh/segmentio/objconv) [![Go Report Card](https://goreportcard.com/badge/github.com/segmentio/objconv)](https://goreportcard.com/report/github.com/segmentio/objconv) [![GoDoc](https://godoc.org/github.com/segmentio/objconv?status.svg)](https://godoc.org/github.com/segmentio/objconv) +======= + +This Go package provides the implementation of high performance encoder and +decoders for JSON-like object representations. + +The top-level package exposes the generic types and algorithms for encoding and +decoding values, while each sub-package implements the parser and emitters for +specific types. + +### Breaking changes introduced in [#18](https://github.com/segmentio/objconv/pull/18) + +The `Encoder` type used to have methods exposed to encode specific types for +optimization purposes. The generic `Encode` method has been optimized to make +those other methods obsolete and they were therefore removed. + +Compatibility with the standard library +--------------------------------------- + +The sub-packages providing implementation for specific formats also expose APIs +that mirror those of the standard library to make it easy to integrate with the +objconv package. However there are a couple of differences that need to be taken +in consideration: + +- Encoder and Decoder types are not exposed in the objconv sub-packages, instead +the types from the top-level package are used. For example, variables declared +with the `json.Encoder` type would have to be replaced with `objconv.Encoder`. + +- Interfaces like `json.Marshaler` or `json.Unmarshaler` are not supported. +However the `encoding.TextMarshaler` and `encoding.TextUnmarshaler` interfaces +are. + +Encoder +------- + +The package exposes a generic encoder API that let's the program serialize +native values into various formats. + +Here's an example of how to serialize a structure to JSON: +```go +package main + +import ( + "os" + + "github.com/segmentio/objconv/json" +) + +func main() { + e := json.NewEncoder(os.Stdout) + e.Encode(struct{ Hello string }{"World"}) +} +``` +``` +$ go run ./example.go +{"Hello":"World"} +``` + +Note that this code is fully compatible with the standard `encoding/json` +package. + +Decoder +------- + +Here's an example of how to use a JSON decoder: +```go +package main + +import ( + "fmt" + "os" + + "github.com/segmentio/objconv/json" +) + +func main() { + v := struct{ Message string }{} + + d := json.NewDecoder(os.Stdin) + d.Decode(&v) + + fmt.Println(v.Message) +} +``` +``` +$ echo '{ "Message": "Hello World!" }' | go run ./example.go +Hello World! +``` + +Streaming +--------- + +One of the interesting features of the `objconv` package is the ability to read +and write streams of data. This has several advantages in terms of memory usage +and latency when passing data from service to service. +The package exposes the `StreamEncoder` and `StreamDecoder` types for this +purpose. + +For example the JSON stream encoder and decoder can produce a JSON array as a +stream where data are produced and consumed on the fly as they become available, +here's an example: +```go +package main + +import ( + "io" + + "github.com/segmentio/objconv/json" +) + +func main() { + r, w := io.Pipe() + + go func() { + defer w.Close() + + e := json.NewStreamEncoder(w) + defer e.Close() + + // Produce values to the JSON stream. + for i := 0; i != 1000; i++ { + e.Encode(i) + } + }() + + d := json.NewStreamDecoder(r) + + // Consume values from the JSON stream. + var v int + + for d.Decode(&v) == nil { + // v => {0..999} + // ... + } +} +``` + +Stream decoders are capable of reading values from either arrays or single +values, this is very convenient when an program cannot predict the structure of +the stream. If the actual data representation is not an array the stream decoder +will simply behave like a normal decoder and produce a single value. + +Encoding and decoding custom types +---------------------------------- + +To override the default encoder and decoder behaviors a type may implement the +`ValueEncoder` or `ValueDecoder` interface. The method on these interfaces are +called to customize the default behavior. + +This can prove very useful to represent slice of pairs as maps for example: +```go +type KV struct { + K string + V interface{} +} + +type M []KV + +// Implement the ValueEncoder interface to provide a custom encoding. +func (m M) EncodeValue(e objconv.Encoder) error { + i := 0 + return e.EncodeMap(len(m), func(k objconv.Encoder, v objconv.Encoder) (err error) { + if err = k.Encode(m[i].K); err != nil { + return + } + if err = v.Encode(m[i].V); err != nil { + return + } + i++ + return + }) +} +``` + +Mime Types +---------- + +The `objconv` package exposes APIs for registering codecs for specific mime +types. When an objconv package for a specific format is imported +it registers itself on the global registry to be later referred by name. + +```go +import ( + "bytes" + + "github.com/segmentio/objconv" + _ "github.com/segmentio/objconv/json" // registers the JSON codec +) + +func main() { + // Lookup the JSON codec. + jsonCodec, ok := objconv.Lookup("application/json") + + if !ok { + panic("unreachable") + } + + // Create a new encoder from the codec. + b := &bytes.Buffer{} + e := jsonCodec.NewEncoder(b) + + // ... +} +``` diff --git a/vendor/github.com/segmentio/objconv/adapter.go b/vendor/github.com/segmentio/objconv/adapter.go new file mode 100644 index 0000000..9e738ff --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapter.go @@ -0,0 +1,55 @@ +package objconv + +import ( + "reflect" + "sync" +) + +// An Adapter is a pair of an encoder and a decoder function that can be +// installed on the package to support new types. +type Adapter struct { + Encode func(Encoder, reflect.Value) error + Decode func(Decoder, reflect.Value) error +} + +// Install adds an adapter for typ. +// +// The function panics if one of the encoder and decoder functions of the +// adapter are nil. +// +// A typical use case for this function is to be called during the package +// initialization phase to extend objconv support for new types. +func Install(typ reflect.Type, adapter Adapter) { + if adapter.Encode == nil { + panic("objconv: the encoder function of an adapter cannot be nil") + } + + if adapter.Decode == nil { + panic("objconv: the decoder function of an adapter cannot be nil") + } + + adapterMutex.Lock() + adapterStore[typ] = adapter + adapterMutex.Unlock() + + // We have to clear the struct cache because it may now have become invalid. + // Because installing adapters is done in the package initialization phase + // it's unlikely that any encoding or decoding operations are taking place + // at this time so there should be no performance impact of clearing the + // cache. + structCache.clear() +} + +// AdapterOf returns the adapter for typ, setting ok to true if one was found, +// false otherwise. +func AdapterOf(typ reflect.Type) (a Adapter, ok bool) { + adapterMutex.RLock() + a, ok = adapterStore[typ] + adapterMutex.RUnlock() + return +} + +var ( + adapterMutex sync.RWMutex + adapterStore = make(map[reflect.Type]Adapter) +) diff --git a/vendor/github.com/segmentio/objconv/adapters/doc.go b/vendor/github.com/segmentio/objconv/adapters/doc.go new file mode 100644 index 0000000..3dac44f --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/doc.go @@ -0,0 +1,12 @@ +// Package adapters installs all adapters from its subpackages into the objconv +// package. +// +// This package exposes no functions or types and is solely useful for the side +// effect of setting up extra adapters on the objconv package on initialization. +package adapters + +import ( + _ "github.com/segmentio/objconv/adapters/net" + _ "github.com/segmentio/objconv/adapters/net/mail" + _ "github.com/segmentio/objconv/adapters/net/url" +) diff --git a/vendor/github.com/segmentio/objconv/adapters/net/decode.go b/vendor/github.com/segmentio/objconv/adapters/net/decode.go new file mode 100644 index 0000000..aee42d5 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/decode.go @@ -0,0 +1,137 @@ +package net + +import ( + "errors" + "net" + "reflect" + "strconv" + "strings" + + "github.com/segmentio/objconv" +) + +func decodeTCPAddr(d objconv.Decoder, to reflect.Value) (err error) { + var a net.TCPAddr + var s string + + if err = d.Decode(&s); err != nil { + return + } + + if a.IP, a.Port, a.Zone, err = parseNetAddr(s); err != nil { + return + } + + if to.IsValid() { + to.Set(reflect.ValueOf(a)) + } + return +} + +func decodeUDPAddr(d objconv.Decoder, to reflect.Value) (err error) { + var a net.UDPAddr + var s string + + if err = d.Decode(&s); err != nil { + return + } + + if a.IP, a.Port, a.Zone, err = parseNetAddr(s); err != nil { + return + } + + if to.IsValid() { + to.Set(reflect.ValueOf(a)) + } + return +} + +func decodeUnixAddr(d objconv.Decoder, to reflect.Value) (err error) { + var a net.UnixAddr + var s string + + if err = d.Decode(&s); err != nil { + return + } + + if i := strings.Index(s, "://"); i >= 0 { + a.Net, a.Name = s[:i], s[i+3:] + } else { + a.Net, a.Name = "unix", s + } + + if to.IsValid() { + to.Set(reflect.ValueOf(a)) + } + return +} + +func decodeIPAddr(d objconv.Decoder, to reflect.Value) (err error) { + var a net.IPAddr + var s string + + if err = d.Decode(&s); err != nil { + return + } + + if i := strings.IndexByte(s, '%'); i >= 0 { + s, a.Zone = s[:i], s[i+1:] + } + + if a.IP = net.ParseIP(s); a.IP == nil { + err = errors.New("objconv: bad IP address: " + s) + return + } + + if to.IsValid() { + to.Set(reflect.ValueOf(a)) + } + return +} + +func decodeIP(d objconv.Decoder, to reflect.Value) (err error) { + var ip net.IP + var s string + + if err = d.Decode(&s); err != nil { + return + } + + if ip = net.ParseIP(s); ip == nil { + err = errors.New("objconv: bad IP address: " + s) + return + } + + if to.IsValid() { + to.Set(reflect.ValueOf(ip)) + } + return +} + +func parseNetAddr(s string) (ip net.IP, port int, zone string, err error) { + var h string + var p string + + if h, p, err = net.SplitHostPort(s); err != nil { + h, p = s, "" + } + + if len(h) != 0 { + if off := strings.IndexByte(h, '%'); off >= 0 { + h, zone = h[:off], h[off+1:] + } + if ip = net.ParseIP(h); ip == nil { + err = errors.New("objconv: bad IP address: " + s) + return + } + } + + if len(p) != 0 { + if port, err = strconv.Atoi(p); err != nil || port < 0 || port > 65535 { + err = errors.New("objconv: bad port number: " + s) + return + } + } + + return +} diff --git a/vendor/github.com/segmentio/objconv/adapters/net/doc.go b/vendor/github.com/segmentio/objconv/adapters/net/doc.go new file mode 100644 index 0000000..8873861 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/doc.go @@ -0,0 +1,5 @@ +// Package net provides adapters for types in the standard net package. +// +// The types and functions in this package aren't usually used direction and +// instead are used implicitly by installing adapters on objconv. +package net diff --git a/vendor/github.com/segmentio/objconv/adapters/net/encode.go b/vendor/github.com/segmentio/objconv/adapters/net/encode.go new file mode 100644 index 0000000..f820e3b --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/encode.go @@ -0,0 +1,33 @@ +package net + +import ( + "net" + "reflect" + + "github.com/segmentio/objconv" +) + +func encodeTCPAddr(e objconv.Encoder, v reflect.Value) error { + a := v.Interface().(net.TCPAddr) + return e.Encode(a.String()) +} + +func encodeUDPAddr(e objconv.Encoder, v reflect.Value) error { + a := v.Interface().(net.UDPAddr) + return e.Encode(a.String()) +} + +func encodeUnixAddr(e objconv.Encoder, v reflect.Value) error { + a := v.Interface().(net.UnixAddr) + return e.Encode(a.String()) +} + +func encodeIPAddr(e objconv.Encoder, v reflect.Value) error { + a := v.Interface().(net.IPAddr) + return e.Encode(a.String()) +} + +func encodeIP(e objconv.Encoder, v reflect.Value) error { + a := v.Interface().(net.IP) + return e.Encode(a.String()) +} diff --git a/vendor/github.com/segmentio/objconv/adapters/net/init.go b/vendor/github.com/segmentio/objconv/adapters/net/init.go new file mode 100644 index 0000000..547abf6 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/init.go @@ -0,0 +1,56 @@ +package net + +import ( + "net" + "reflect" + + "github.com/segmentio/objconv" +) + +func init() { + objconv.Install(reflect.TypeOf(net.TCPAddr{}), TCPAddrAdapter()) + objconv.Install(reflect.TypeOf(net.UDPAddr{}), UDPAddrAdapter()) + objconv.Install(reflect.TypeOf(net.UnixAddr{}), UnixAddrAdapter()) + objconv.Install(reflect.TypeOf(net.IPAddr{}), IPAddrAdapter()) + objconv.Install(reflect.TypeOf(net.IP(nil)), IPAdapter()) +} + +// TCPAddrAdapter returns the adapter to encode and decode net.TCPAddr values. +func TCPAddrAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeTCPAddr, + Decode: decodeTCPAddr, + } +} + +// UDPAddrAdapter returns the adapter to encode and decode net.UDPAddr values. +func UDPAddrAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeUDPAddr, + Decode: decodeUDPAddr, + } +} + +// UnixAddrAdapter returns the adapter to encode and decode net.UnixAddr values. +func UnixAddrAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeUnixAddr, + Decode: decodeUnixAddr, + } +} + +// IPAddrAdapter returns the adapter to encode and decode net.IPAddr values. +func IPAddrAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeIPAddr, + Decode: decodeIPAddr, + } +} + +// IPAdapter returns the adapter to encode and decode net.IP values. +func IPAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeIP, + Decode: decodeIP, + } +} diff --git a/vendor/github.com/segmentio/objconv/adapters/net/mail/decode.go b/vendor/github.com/segmentio/objconv/adapters/net/mail/decode.go new file mode 100644 index 0000000..1f446a5 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/mail/decode.go @@ -0,0 +1,47 @@ +package mail + +import ( + "errors" + "net/mail" + "reflect" + + "github.com/segmentio/objconv" +) + +func decodeAddress(d objconv.Decoder, to reflect.Value) (err error) { + var a *mail.Address + var s string + + if err = d.Decode(&s); err != nil { + return + } + + if a, err = mail.ParseAddress(s); err != nil { + err = errors.New("objconv: bad email address: " + err.Error()) + return + } + + if to.IsValid() { + to.Set(reflect.ValueOf(*a)) + } + return +} + +func decodeAddressList(d objconv.Decoder, to reflect.Value) (err error) { + var l []*mail.Address + var s string + + if err = d.Decode(&s); err != nil { + return + } + + if l, err = mail.ParseAddressList(s); err != nil { + err = errors.New("objconv: bad email address list: " + err.Error()) + return + } + + if to.IsValid() { + to.Set(reflect.ValueOf(l)) + } + return +} diff --git a/vendor/github.com/segmentio/objconv/adapters/net/mail/doc.go b/vendor/github.com/segmentio/objconv/adapters/net/mail/doc.go new file mode 100644 index 0000000..a34fdac --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/mail/doc.go @@ -0,0 +1,5 @@ +// Package mail provides adapters for types in the standard net/mail package. +// +// The types and functions in this package aren't usually used direction and +// instead are used implicitly by installing adapters on objconv. +package mail diff --git a/vendor/github.com/segmentio/objconv/adapters/net/mail/encode.go b/vendor/github.com/segmentio/objconv/adapters/net/mail/encode.go new file mode 100644 index 0000000..3b11df5 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/mail/encode.go @@ -0,0 +1,31 @@ +package mail + +import ( + "bytes" + "net/mail" + "reflect" + + "github.com/segmentio/objconv" +) + +func encodeAddress(e objconv.Encoder, v reflect.Value) error { + a := v.Interface().(mail.Address) + return e.Encode(a.String()) +} + +func encodeAddressList(e objconv.Encoder, v reflect.Value) error { + l := v.Interface().([]*mail.Address) + b := &bytes.Buffer{} + + for i, a := range l { + if a == nil { + continue + } + if i != 0 { + b.WriteString(", ") + } + b.WriteString(a.String()) + } + + return e.Encode(b.String()) +} diff --git a/vendor/github.com/segmentio/objconv/adapters/net/mail/init.go b/vendor/github.com/segmentio/objconv/adapters/net/mail/init.go new file mode 100644 index 0000000..7962509 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/mail/init.go @@ -0,0 +1,34 @@ +package mail + +import ( + "net/mail" + "reflect" + + "github.com/segmentio/objconv" +) + +func init() { + objconv.Install(reflect.TypeOf(mail.Address{}), AddressAdapter()) + objconv.Install(reflect.TypeOf(([]*mail.Address)(nil)), AddressListAdapter()) +} + +// AddressAdapter returns the adapter to encode and decode mail.Address values. +func AddressAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeAddress, + Decode: decodeAddress, + } +} + +// AddressListAdapter returns the adapter to encode and decode []*mail.Address +// values. +// +// The adapter uses a string representation of the mail address list, in cases +// where the serialized form has to be an actual array of strings the program +// should use []mail.Address (no pointers). +func AddressListAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeAddressList, + Decode: decodeAddressList, + } +} diff --git a/vendor/github.com/segmentio/objconv/adapters/net/url/decode.go b/vendor/github.com/segmentio/objconv/adapters/net/url/decode.go new file mode 100644 index 0000000..59ce859 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/url/decode.go @@ -0,0 +1,47 @@ +package url + +import ( + "errors" + "net/url" + "reflect" + + "github.com/segmentio/objconv" +) + +func decodeURL(d objconv.Decoder, to reflect.Value) (err error) { + var u *url.URL + var s string + + if err = d.Decode(&s); err != nil { + return + } + + if u, err = url.Parse(s); err != nil { + err = errors.New("objconv: bad URL: " + err.Error()) + return + } + + if to.IsValid() { + to.Set(reflect.ValueOf(*u)) + } + return +} + +func decodeQuery(d objconv.Decoder, to reflect.Value) (err error) { + var v url.Values + var s string + + if err = d.Decode(&s); err != nil { + + } + + if v, err = url.ParseQuery(s); err != nil { + err = errors.New("objconv: bad URL values: " + err.Error()) + return + } + + if to.IsValid() { + to.Set(reflect.ValueOf(v)) + } + return +} diff --git a/vendor/github.com/segmentio/objconv/adapters/net/url/doc.go b/vendor/github.com/segmentio/objconv/adapters/net/url/doc.go new file mode 100644 index 0000000..9c9c5d3 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/url/doc.go @@ -0,0 +1,5 @@ +// Package url provides adapters for types in the standard net/url package. +// +// The types and functions in this package aren't usually used direction and +// instead are used implicitly by installing adapters on objconv. +package url diff --git a/vendor/github.com/segmentio/objconv/adapters/net/url/encode.go b/vendor/github.com/segmentio/objconv/adapters/net/url/encode.go new file mode 100644 index 0000000..29017d6 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/url/encode.go @@ -0,0 +1,18 @@ +package url + +import ( + "net/url" + "reflect" + + "github.com/segmentio/objconv" +) + +func encodeURL(e objconv.Encoder, v reflect.Value) error { + u := v.Interface().(url.URL) + return e.Encode(u.String()) +} + +func encodeQuery(e objconv.Encoder, v reflect.Value) error { + q := v.Interface().(url.Values) + return e.Encode(q.Encode()) +} diff --git a/vendor/github.com/segmentio/objconv/adapters/net/url/init.go b/vendor/github.com/segmentio/objconv/adapters/net/url/init.go new file mode 100644 index 0000000..2d4160e --- /dev/null +++ b/vendor/github.com/segmentio/objconv/adapters/net/url/init.go @@ -0,0 +1,29 @@ +package url + +import ( + "net/url" + "reflect" + + "github.com/segmentio/objconv" +) + +func init() { + objconv.Install(reflect.TypeOf(url.URL{}), URLAdapter()) + objconv.Install(reflect.TypeOf(url.Values(nil)), QueryAdapter()) +} + +// URLAdapter returns the adapter to encode and decode url.URL values. +func URLAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeURL, + Decode: decodeURL, + } +} + +// QueryAdapter returns the adapter to encode and decode url.Values values. +func QueryAdapter() objconv.Adapter { + return objconv.Adapter{ + Encode: encodeQuery, + Decode: decodeQuery, + } +} diff --git a/vendor/github.com/segmentio/objconv/codec.go b/vendor/github.com/segmentio/objconv/codec.go new file mode 100644 index 0000000..e5903fc --- /dev/null +++ b/vendor/github.com/segmentio/objconv/codec.go @@ -0,0 +1,104 @@ +package objconv + +import ( + "io" + "sync" +) + +// A Codec is a factory for encoder and decoders that work on byte streams. +type Codec struct { + NewEmitter func(io.Writer) Emitter + NewParser func(io.Reader) Parser +} + +// NewEncoder returns a new encoder that outputs to w. +func (c Codec) NewEncoder(w io.Writer) *Encoder { + return NewEncoder(c.NewEmitter(w)) +} + +// NewDecoder returns a new decoder that takes input from r. +func (c Codec) NewDecoder(r io.Reader) *Decoder { + return NewDecoder(c.NewParser(r)) +} + +// NewStreamEncoder returns a new stream encoder that outputs to w. +func (c Codec) NewStreamEncoder(w io.Writer) *StreamEncoder { + return NewStreamEncoder(c.NewEmitter(w)) +} + +// NewStreamDecoder returns a new stream decoder that takes input from r. +func (c Codec) NewStreamDecoder(r io.Reader) *StreamDecoder { + return NewStreamDecoder(c.NewParser(r)) +} + +// A Registry associates mime types to codecs. +// +// It is safe to use a registry concurrently from multiple goroutines. +type Registry struct { + mutex sync.RWMutex + codecs map[string]Codec +} + +// Register adds a codec for a mimetype to r. +func (reg *Registry) Register(mimetype string, codec Codec) { + defer reg.mutex.Unlock() + reg.mutex.Lock() + + if reg.codecs == nil { + reg.codecs = make(map[string]Codec) + } + + reg.codecs[mimetype] = codec +} + +// Unregister removes the codec for a mimetype from r. +func (reg *Registry) Unregister(mimetype string) { + defer reg.mutex.Unlock() + reg.mutex.Lock() + + delete(reg.codecs, mimetype) +} + +// Lookup returns the codec associated with mimetype, ok is set to true or false +// based on whether a codec was found. +func (reg *Registry) Lookup(mimetype string) (codec Codec, ok bool) { + reg.mutex.RLock() + codec, ok = reg.codecs[mimetype] + reg.mutex.RUnlock() + return +} + +// Codecs returns a map of all codecs registered in reg. +func (reg *Registry) Codecs() (codecs map[string]Codec) { + codecs = make(map[string]Codec) + reg.mutex.RLock() + for mimetype, codec := range reg.codecs { + codecs[mimetype] = codec + } + reg.mutex.RUnlock() + return +} + +// The global registry to which packages add their codecs. +var registry Registry + +// Register adds a codec for a mimetype to the global registry. +func Register(mimetype string, codec Codec) { + registry.Register(mimetype, codec) +} + +// Unregister removes the codec for a mimetype from the global registry. +func Unregister(mimetype string) { + registry.Unregister(mimetype) +} + +// Lookup returns the codec associated with mimetype, ok is set to true or false +// based on whether a codec was found. +func Lookup(mimetype string) (Codec, bool) { + return registry.Lookup(mimetype) +} + +// Codecs returns a map of all codecs registered in the global registry. +func Codecs() map[string]Codec { + return registry.Codecs() +} diff --git a/vendor/github.com/segmentio/objconv/decode.go b/vendor/github.com/segmentio/objconv/decode.go new file mode 100644 index 0000000..c86091d --- /dev/null +++ b/vendor/github.com/segmentio/objconv/decode.go @@ -0,0 +1,1602 @@ +package objconv + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "time" + "unsafe" + + "github.com/segmentio/objconv/objutil" +) + +// A Decoder implements the algorithms for building data structures from their +// serialized forms. +// +// Decoders are not safe for use by multiple goroutines. +type Decoder struct { + // Parser to use to load values. + Parser Parser + + // MapType is used to override the type of maps produced by the decoder when + // there is not destination type (when decoding to an empty interface). + MapType reflect.Type + + off int // offset of the value when decoding a map +} + +// NewDecoder returns a decoder object that uses p, will panic if p is nil. +func NewDecoder(p Parser) *Decoder { + if p == nil { + panic("objconv: the parser is nil") + } + return &Decoder{Parser: p} +} + +// Decode expects v to be a pointer to a value in which the decoder will load +// the next parsed data. +// +// The method panics if v is neither a pointer type nor implements the +// ValueDecoder interface, or if v is a nil pointer. +func (d Decoder) Decode(v interface{}) error { + to := reflect.ValueOf(v) + + if d.off != 0 { + var err error + if d.off, err = 0, d.Parser.ParseMapValue(d.off-1); err != nil { + return err + } + } + + if !to.IsValid() { + // This special case for a nil value is used to make it possible to + // discard decoded values. + _, err := d.decodeInterface(to) + return err + } + + // Optimization for ValueDecoder, in practice tho it's also handled in the + // methods that are based on reflection. + switch x := v.(type) { + case ValueDecoder: + return x.DecodeValue(d) + } + + if to.Kind() == reflect.Ptr { + // In most cases the method receives a pointer, but we may also have to + // support types that aren't pointers but implement ValueDecoder, or + // types that have got adapters set. + // If we're not in either of those cases the code will likely panic when + // the value is set because it won't be addressable. + to = to.Elem() + } + + _, err := d.decode(to) + return err +} + +func (d Decoder) decode(to reflect.Value) (Type, error) { + return decodeFuncOf(to.Type())(d, to) +} + +func (d Decoder) decodeBool(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeBoolFromType(t, to) + } + return +} + +func (d Decoder) decodeBoolFromType(t Type, to reflect.Value) (err error) { + var v bool + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case Bool: + v, err = d.Parser.ParseBool() + + default: + err = typeConversionError(t, Bool) + } + + if err != nil { + return + } + + if to.IsValid() { + to.SetBool(v) + } + return +} + +func (d Decoder) decodeInt(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeIntFromType(t, to) + } + return +} + +func (d Decoder) decodeIntFromType(t Type, to reflect.Value) (err error) { + var valid = to.IsValid() + var i int64 + var u uint64 + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case Int: + if i, err = d.Parser.ParseInt(); err != nil { + return + } + + if valid { + switch t := to.Type(); t.Kind() { + case reflect.Int: + err = objutil.CheckInt64Bounds(i, int64(objutil.IntMin), uint64(objutil.IntMax), t) + case reflect.Int8: + err = objutil.CheckInt64Bounds(i, objutil.Int8Min, objutil.Int8Max, t) + case reflect.Int16: + err = objutil.CheckInt64Bounds(i, objutil.Int16Min, objutil.Int16Max, t) + case reflect.Int32: + err = objutil.CheckInt64Bounds(i, objutil.Int32Min, objutil.Int32Max, t) + } + } + + case Uint: + if u, err = d.Parser.ParseUint(); err != nil { + return + } + + if valid { + switch t := to.Type(); t.Kind() { + case reflect.Int: + err = objutil.CheckUint64Bounds(u, uint64(objutil.IntMax), t) + case reflect.Int8: + err = objutil.CheckUint64Bounds(u, objutil.Int8Max, t) + case reflect.Int16: + err = objutil.CheckUint64Bounds(u, objutil.Int16Max, t) + case reflect.Int32: + err = objutil.CheckUint64Bounds(u, objutil.Int32Max, t) + case reflect.Int64: + err = objutil.CheckUint64Bounds(u, objutil.Int64Max, t) + } + } + + i = int64(u) + + case String: + var b []byte + + if b, err = d.Parser.ParseString(); err != nil { + return + } + + i, err = strconv.ParseInt(unsafeString(b), 10, 64) + // if an error is received, reparse with a "safe" string in case it is retained in the error + if err != nil { + _, err = strconv.ParseInt(string(b), 10, 64) + } + + case Bytes: + var b []byte + + if b, err = d.Parser.ParseBytes(); err != nil { + return + } + + i, err = strconv.ParseInt(unsafeString(b), 10, 64) + // if an error is received, reparse with a "safe" string in case it is retained in the error + if err != nil { + _, err = strconv.ParseInt(string(b), 10, 64) + } + + default: + err = typeConversionError(t, Int) + } + + if err != nil { + return + } + + if valid { + to.SetInt(i) + } + return +} + +func (d Decoder) decodeUint(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeUintFromType(t, to) + } + return +} + +func (d Decoder) decodeUintFromType(t Type, to reflect.Value) (err error) { + var valid = to.IsValid() + var i int64 + var u uint64 + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case Int: + if i, err = d.Parser.ParseInt(); err != nil { + return + } + + if valid { + switch t := to.Type(); t.Kind() { + case reflect.Uint: + err = objutil.CheckInt64Bounds(i, 0, uint64(objutil.UintMax), t) + case reflect.Uint8: + err = objutil.CheckInt64Bounds(i, 0, objutil.Uint8Max, t) + case reflect.Uint16: + err = objutil.CheckInt64Bounds(i, 0, objutil.Uint16Max, t) + case reflect.Uint32: + err = objutil.CheckInt64Bounds(i, 0, objutil.Uint32Max, t) + case reflect.Uint64: + err = objutil.CheckInt64Bounds(i, 0, objutil.Uint64Max, t) + } + } + + u = uint64(i) + + case Uint: + if u, err = d.Parser.ParseUint(); err != nil { + return + } + + if valid { + switch t := to.Type(); t.Kind() { + case reflect.Uint: + err = objutil.CheckUint64Bounds(u, uint64(objutil.UintMax), t) + case reflect.Uint8: + err = objutil.CheckUint64Bounds(u, objutil.Uint8Max, t) + case reflect.Uint16: + err = objutil.CheckUint64Bounds(u, objutil.Uint16Max, t) + case reflect.Uint32: + err = objutil.CheckUint64Bounds(u, objutil.Uint32Max, t) + } + } + + case String: + var b []byte + + if b, err = d.Parser.ParseString(); err != nil { + return + } + + u, err = strconv.ParseUint(unsafeString(b), 10, 64) + // if an error is received, reparse with a "safe" string in case it is retained in the error + if err != nil { + _, err = strconv.ParseUint(string(b), 10, 64) + } + + case Bytes: + var b []byte + + if b, err = d.Parser.ParseBytes(); err != nil { + return + } + + u, err = strconv.ParseUint(unsafeString(b), 10, 64) + // if an error is received, reparse with a "safe" string in case it is retained in the error + if err != nil { + _, err = strconv.ParseUint(string(b), 10, 64) + } + + default: + err = typeConversionError(t, Uint) + } + + if err != nil { + return + } + + if valid { + to.SetUint(u) + } + return +} + +func (d Decoder) decodeFloat(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeFloatFromType(t, to) + } + return +} + +func (d Decoder) decodeFloatFromType(t Type, to reflect.Value) (err error) { + var i int64 + var u uint64 + var f float64 + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case Int: + if i, err = d.Parser.ParseInt(); err == nil { + if err = objutil.CheckInt64Bounds(i, objutil.Float64IntMin, objutil.Float64IntMax, int64Type); err == nil { + f = float64(i) + } + } + + case Uint: + if u, err = d.Parser.ParseUint(); err == nil { + if err = objutil.CheckUint64Bounds(u, objutil.Float64IntMax, uint64Type); err == nil { + f = float64(u) + } + } + + case Float: + f, err = d.Parser.ParseFloat() + + case String: + var b []byte + + if b, err = d.Parser.ParseString(); err != nil { + return + } + + f, err = strconv.ParseFloat(unsafeString(b), 64) + // if an error is received, reparse with a "safe" string in case it is retained in the error + if err != nil { + _, err = strconv.ParseFloat(string(b), 64) + } + + case Bytes: + var b []byte + + if b, err = d.Parser.ParseBytes(); err != nil { + return + } + + f, err = strconv.ParseFloat(unsafeString(b), 64) + // if an error is received, reparse with a "safe" string in case it is retained in the error + if err != nil { + _, err = strconv.ParseFloat(string(b), 64) + } + + default: + err = typeConversionError(t, Float) + } + + if err != nil { + return + } + + if to.IsValid() { + to.SetFloat(f) + } + return +} + +func (d Decoder) decodeString(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeStringFromType(t, to) + } + return +} + +func (d Decoder) decodeStringFromType(t Type, to reflect.Value) (err error) { + var a [64]byte + var b []byte + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case String: + b, err = d.Parser.ParseString() + + case Bytes: + b, err = d.Parser.ParseBytes() + + case Bool: + var v bool + if v, err = d.Parser.ParseBool(); err == nil { + if v { + b = append(a[:0], "true"...) + } else { + b = append(a[:0], "false"...) + } + } + + case Int: + var v int64 + if v, err = d.Parser.ParseInt(); err == nil { + b = strconv.AppendInt(a[:0], v, 10) + } + + case Uint: + var v uint64 + if v, err = d.Parser.ParseUint(); err == nil { + b = strconv.AppendUint(a[:0], v, 10) + } + + case Float: + var v float64 + if v, err = d.Parser.ParseFloat(); err == nil { + b = strconv.AppendFloat(a[:0], v, 'g', -1, 64) + } + + case Time: + var v time.Time + if v, err = d.Parser.ParseTime(); err == nil { + b = v.AppendFormat(a[:0], time.RFC3339Nano) + } + + case Duration: + var v time.Duration + if v, err = d.Parser.ParseDuration(); err == nil { + b = objutil.AppendDuration(a[:0], v) + } + + case Error: + var v error + if v, err = d.Parser.ParseError(); err == nil { + b = append(a[:0], v.Error()...) + } + + default: + err = typeConversionError(t, String) + } + + if err != nil { + return + } + + if to.IsValid() { + to.SetString(string(b)) + } + return +} + +func (d Decoder) decodeBytes(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeBytesFromType(t, to) + } + return +} + +func (d Decoder) decodeBytesFromType(t Type, to reflect.Value) (err error) { + var b []byte + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case String: + b, err = d.Parser.ParseString() + + case Bytes: + b, err = d.Parser.ParseBytes() + + default: + err = typeConversionError(t, String) + } + + if err != nil { + return + } + + if bd, ok := d.Parser.(bytesDecoder); ok { + if b, err = bd.DecodeBytes(b); err != nil { + return + } + } + + if to.IsValid() { + if t == Nil { + to.SetBytes(nil) + } else { + v := make([]byte, len(b)) + copy(v, b) + to.SetBytes(v) + } + } + return +} + +func (d Decoder) decodeTime(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeTimeFromType(t, to) + } + return +} + +func (d Decoder) decodeTimeFromType(t Type, to reflect.Value) (err error) { + var s []byte + var v time.Time + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case String: + s, err = d.Parser.ParseString() + + case Bytes: + s, err = d.Parser.ParseBytes() + + case Time: + v, err = d.Parser.ParseTime() + } + + if err != nil { + return + } + + if to.IsValid() { + if t == String || t == Bytes { + v, err = time.Parse(time.RFC3339Nano, unsafeString(s)) + // if an error is received, reparse with a "safe" string in case it is retained in the error + if err != nil { + _, err = time.Parse(time.RFC3339Nano, string(t)) + } + } + *(to.Addr().Interface().(*time.Time)) = v + } + return +} + +func (d Decoder) decodeDuration(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeDurationFromType(t, to) + } + return +} + +func (d Decoder) decodeDurationFromType(t Type, to reflect.Value) (err error) { + var s []byte + var v time.Duration + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case String: + s, err = d.Parser.ParseString() + + case Bytes: + s, err = d.Parser.ParseBytes() + + case Duration: + v, err = d.Parser.ParseDuration() + } + + if err != nil { + return + } + + if t == String || t == Bytes { + v, err = time.ParseDuration(unsafeString(s)) + // if an error is received, reparse with a "safe" string in case it is retained in the error + if err != nil { + _, err = time.ParseDuration(string(s)) + } + } + + if to.IsValid() { + to.SetInt(int64(v)) + } + return +} + +func (d Decoder) decodeError(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeErrorFromType(t, to) + } + return +} + +func (d Decoder) decodeErrorFromType(t Type, to reflect.Value) (err error) { + var s []byte + var v error + + switch t { + case Nil: + err = d.Parser.ParseNil() + + case String: + s, err = d.Parser.ParseString() + + case Bytes: + s, err = d.Parser.ParseBytes() + + case Error: + v, err = d.Parser.ParseError() + } + + if err != nil { + return + } + + if to.IsValid() { + if t == String || t == Bytes { + v = errors.New(string(s)) + } + to.Set(reflect.ValueOf(v)) + } + return +} + +func (d Decoder) decodeSlice(to reflect.Value) (t Type, err error) { + return d.decodeSliceWith(to, decodeFuncOf(to.Type().Elem())) +} + +func (d Decoder) decodeSliceWith(to reflect.Value, f decodeFunc) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeSliceFromTypeWith(t, to, f) + } + return +} + +func (d Decoder) decodeSliceFromType(typ Type, to reflect.Value) (err error) { + f := Decoder.decodeInterface + if to.IsValid() { + f = decodeFuncOf(to.Type().Elem()) + } + return d.decodeSliceFromTypeWith(typ, to, f) +} + +func (d Decoder) decodeSliceFromTypeWith(typ Type, to reflect.Value, f decodeFunc) (err error) { + if !to.IsValid() { + return d.decodeArrayImpl(typ, func(d Decoder) (err error) { + _, err = f(d, reflect.Value{}) + return + }) + } + + t := to.Type() + s := reflect.MakeSlice(t, 0, 0) + i := 0 + n := 0 + + if err = d.decodeArrayImpl(typ, func(d Decoder) (err error) { + if i == n { + if n *= 5; n == 0 { + n = 10 + } + sc := reflect.MakeSlice(t, n, n) + reflect.Copy(sc, s) + s = sc + } + if _, err = f(d, s.Index(i)); err != nil { + return + } + i++ + return + }); err != nil { + return + } + + if typ == Nil { + to.Set(zeroValueOf(t)) + } else { + if i != n { + s = s.Slice(0, i) + } + to.Set(s) + } + return +} + +func (d Decoder) decodeArray(to reflect.Value) (t Type, err error) { + return d.decodeArrayWith(to, decodeFuncOf(to.Type().Elem())) +} + +func (d Decoder) decodeArrayWith(to reflect.Value, f decodeFunc) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeArrayFromTypeWith(t, to, f) + } + return +} + +func (d Decoder) decodeArrayFromTypeWith(typ Type, to reflect.Value, f decodeFunc) (err error) { + n := to.Len() // len(to) + t := to.Type() // [...]T + e := t.Elem() // T + z := zeroValueOf(e) // T{} + + for i := 0; i != n; i++ { + to.Index(i).Set(z) // reset to the zero-value + } + + i := 0 + + if err = d.decodeArrayImpl(typ, func(d Decoder) (err error) { + if i < n { + if _, err = f(d, to.Index(i)); err != nil { + return + } + } + i++ + return + }); err != nil { + return + } + + if typ == Nil { + to.Set(zeroValueOf(t)) + } else if i != n { + err = fmt.Errorf("objconv: array length mismatch, expected %d but only %d elements were decoded", n, i) + } + + return +} + +func (d Decoder) decodeMap(to reflect.Value) (Type, error) { + t := to.Type() + return d.decodeMapWith(to, decodeFuncOf(t.Key()), decodeFuncOf(t.Elem())) +} + +func (d Decoder) decodeMapWith(to reflect.Value, kf decodeFunc, vf decodeFunc) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeMapFromTypeWith(t, to, kf, vf) + } + return +} + +func (d Decoder) decodeMapFromType(typ Type, to reflect.Value) (err error) { + kf := Decoder.decodeInterface + vf := Decoder.decodeInterface + if to.IsValid() { + t := to.Type() + kf = decodeFuncOf(t.Key()) + vf = decodeFuncOf(t.Elem()) + } + return d.decodeMapFromTypeWith(typ, to, kf, vf) +} + +func (d Decoder) decodeMapFromTypeWith(typ Type, to reflect.Value, kf decodeFunc, vf decodeFunc) (err error) { + if !to.IsValid() { + return d.decodeMapImpl(typ, func(kd Decoder, vd Decoder) (err error) { + if _, err = d.decodeInterface(reflect.Value{}); err != nil { + return + } + if err = d.Parser.ParseMapValue(vd.off - 1); err != nil { + return + } + _, err = d.decodeInterface(reflect.Value{}) + return + }) + } + + t := to.Type() // map[K]V + + switch t { + case mapInterfaceInterfaceType: + return d.decodeMapInterfaceInterface(typ, to) + + case mapStringInterfaceType: + return d.decodeMapStringInterface(typ, to) + + case mapStringStringType: + return d.decodeMapStringString(typ, to) + } + + m := reflect.MakeMap(t) // make(map[K]V) + + kt := t.Key() // K + kz := zeroValueOf(kt) // K{} + kv := reflect.New(kt).Elem() // &K{} + + vt := t.Elem() // V + vz := zeroValueOf(vt) // V{} + vv := reflect.New(vt).Elem() // &V{} + + if err = d.decodeMapImpl(typ, func(kd Decoder, vd Decoder) (err error) { + kv.Set(kz) // reset the key to its zero-value + vv.Set(vz) // reset the value to its zero-value + if _, err = kf(d, kv); err != nil { + return + } + if err = d.Parser.ParseMapValue(vd.off - 1); err != nil { + return + } + if _, err = vf(d, vv); err != nil { + return + } + m.SetMapIndex(kv, vv) + return + }); err != nil { + return + } + + if typ == Nil { + to.Set(zeroValueOf(t)) + } else { + to.Set(m) + } + return +} + +func (d Decoder) decodeMapInterfaceInterface(typ Type, to reflect.Value) error { + m := to.Interface().(map[interface{}]interface{}) + + if m == nil { + m = make(map[interface{}]interface{}) + to.Set(reflect.ValueOf(m)) + } + + for k := range m { + delete(m, k) + } + + return d.decodeMapImpl(typ, func(kd Decoder, vd Decoder) (err error) { + var k interface{} + var v interface{} + + if err = kd.Decode(&k); err != nil { + return + } + if err = vd.Decode(&v); err != nil { + return + } + + m[k] = v + return + }) +} + +func (d Decoder) decodeMapStringInterface(typ Type, to reflect.Value) (err error) { + m := to.Interface().(map[string]interface{}) + + if m == nil { + m = make(map[string]interface{}) + to.Set(reflect.ValueOf(m)) + } + + for k := range m { + delete(m, k) + } + + return d.decodeMapImpl(typ, func(kd Decoder, vd Decoder) (err error) { + var b []byte + var k string + var v interface{} + + if _, b, err = d.decodeTypeAndString(); err != nil { + return + } + k = string(b) + + if err = vd.Decode(&v); err != nil { + return + } + + m[k] = v + return + }) +} + +func (d Decoder) decodeMapStringString(typ Type, to reflect.Value) (err error) { + m := to.Interface().(map[string]string) + + if m == nil { + m = make(map[string]string) + to.Set(reflect.ValueOf(m)) + } + + for k := range m { + delete(m, k) + } + + return d.decodeMapImpl(typ, func(kd Decoder, vd Decoder) (err error) { + var b []byte + var k string + var v string + + if _, b, err = d.decodeTypeAndString(); err != nil { + return + } + k = string(b) + + if err = d.Parser.ParseMapValue(vd.off - 1); err != nil { + return + } + + if _, b, err = d.decodeTypeAndString(); err != nil { + return + } + v = string(b) + + m[k] = v + return + }) +} + +func (d Decoder) decodeStruct(to reflect.Value) (Type, error) { + return d.decodeStructWith(to, structCache.lookup(to.Type())) +} + +func (d Decoder) decodeStructWith(to reflect.Value, s *structType) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeStructFromTypeWith(t, to, s) + } + return +} + +func (d Decoder) decodeStructFromTypeWith(typ Type, to reflect.Value, s *structType) (err error) { + if err = d.decodeMapImpl(typ, func(kd Decoder, vd Decoder) (err error) { + var b []byte + + if _, b, err = d.decodeTypeAndString(); err != nil { + return + } + f := s.fieldsByName[string(b)] + + if err = d.Parser.ParseMapValue(vd.off - 1); err != nil { + return + } + + if f == nil { + _, err = d.decodeInterface(reflect.Value{}) // discard + return + } + + _, err = f.decode(d, to.FieldByIndex(f.index)) + return + }); err != nil { + to.Set(zeroValueOf(to.Type())) + } + return +} + +func (d Decoder) decodePointer(to reflect.Value) (Type, error) { + return d.decodePointerWith(to, decodeFuncOf(to.Type().Elem())) +} + +func (d Decoder) decodePointerWith(to reflect.Value, f decodeFunc) (typ Type, err error) { + var t = to.Type() + var v reflect.Value + + if to.IsNil() { + v = reflect.New(t.Elem()) + } else { + v = to + } + + if typ, err = f(d, v.Elem()); err != nil { + return + } + + switch { + case typ == Nil: + to.Set(zeroValueOf(t)) + case to.IsNil(): + to.Set(v) + } + + return +} + +func (d Decoder) decodeDecoderPointer(to reflect.Value) (Type, error) { + return Unknown /* just needs to not be Nil */, to.Addr().Interface().(ValueDecoder).DecodeValue(d) +} + +func (d Decoder) decodeDecoder(to reflect.Value) (Type, error) { + return Unknown /* just needs to not be Nil */, to.Interface().(ValueDecoder).DecodeValue(d) +} + +func (d Decoder) decodeUnmarshalerPointer(to reflect.Value) (Type, error) { + return d.decodeUnmarshaler(to.Addr()) +} + +func (d Decoder) decodeUnmarshaler(to reflect.Value) (Type, error) { + if isTextParser(d.Parser) { + return d.decodeTextUnmarshaler(to) + } + return d.decodeBinaryUnmarshaler(to) +} + +func (d Decoder) decodeBinaryUnmarshalerPointer(to reflect.Value) (Type, error) { + return d.decodeBinaryUnmarshaler(to.Addr()) +} + +func (d Decoder) decodeBinaryUnmarshaler(to reflect.Value) (t Type, err error) { + var b []byte + var v = reflect.ValueOf(&b).Elem() + + if t, err = d.decodeBytes(v); err != nil { + return + } + + if to.Kind() == reflect.Ptr && to.IsNil() { + to.Set(reflect.New(to.Type().Elem())) + } + + err = to.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(b) + return +} + +func (d Decoder) decodeTextUnmarshalerPointer(to reflect.Value) (Type, error) { + return d.decodeTextUnmarshaler(to.Addr()) +} + +func (d Decoder) decodeTextUnmarshaler(to reflect.Value) (t Type, err error) { + var s string + var v = reflect.ValueOf(&s).Elem() + + if t, err = d.decodeString(v); err != nil { + return + } + + if to.Kind() == reflect.Ptr && to.IsNil() { + to.Set(reflect.New(to.Type().Elem())) + } + + err = to.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(s)) + return +} + +func (d Decoder) decodeInterface(to reflect.Value) (t Type, err error) { + if t, err = d.Parser.ParseType(); err == nil { + err = d.decodeInterfaceFromType(t, to) + } + return +} + +func (d Decoder) decodeInterfaceFromType(t Type, to reflect.Value) (err error) { + switch t { + case Nil: + err = d.decodeInterfaceFromNil(to) + case Bool: + err = d.decodeInterfaceFrom(boolType, t, to, Decoder.decodeBoolFromType) + case Int: + err = d.decodeInterfaceFrom(int64Type, t, to, Decoder.decodeIntFromType) + case Uint: + err = d.decodeInterfaceFrom(uint64Type, t, to, Decoder.decodeUintFromType) + case Float: + err = d.decodeInterfaceFrom(float64Type, t, to, Decoder.decodeFloatFromType) + case String: + err = d.decodeInterfaceFrom(stringType, t, to, Decoder.decodeStringFromType) + case Bytes: + err = d.decodeInterfaceFrom(bytesType, t, to, Decoder.decodeBytesFromType) + case Time: + err = d.decodeInterfaceFrom(timeType, t, to, Decoder.decodeTimeFromType) + case Duration: + err = d.decodeInterfaceFrom(durationType, t, to, Decoder.decodeDurationFromType) + case Error: + err = d.decodeInterfaceFrom(errorInterface, t, to, Decoder.decodeErrorFromType) + case Array: + err = d.decodeInterfaceFrom(sliceInterfaceType, t, to, Decoder.decodeSliceFromType) + case Map: + if to.IsValid() && d.MapType != nil { + v := reflect.New(d.MapType).Elem() + _, err = d.decode(v) + to.Set(v) + } else { + err = d.decodeInterfaceFrom(mapInterfaceInterfaceType, t, to, Decoder.decodeMapFromType) + } + default: + panic("objconv: parser returned an unsupported value type: " + t.String()) + } + return +} + +func (d Decoder) decodeInterfaceFromNil(to reflect.Value) (err error) { + if err = d.Parser.ParseNil(); err == nil { + if to.IsValid() { + to.Set(zeroValueOf(to.Type())) + } + } + return +} + +func (d Decoder) decodeInterfaceFrom(from reflect.Type, t Type, to reflect.Value, decode func(Decoder, Type, reflect.Value) error) (err error) { + if !to.IsValid() { + return decode(d, t, reflect.Value{}) + } + + v := reflect.New(from).Elem() + + if err = decode(d, t, v); err != nil { + return + } + + to.Set(v) + return +} + +func (d Decoder) decodeUnsupported(to reflect.Value) (Type, error) { + return Nil, fmt.Errorf("objconv: the decoder doesn't support values of type %s", to.Type()) +} + +func (d Decoder) decodeTypeAndString() (t Type, b []byte, err error) { + if t, err = d.Parser.ParseType(); err == nil { + // This algorithm is the same than the one used in + // decodeStringWithType, and should be kept in sync. + switch t { + case Nil: + err = d.Parser.ParseNil() + case String: + b, err = d.Parser.ParseString() + case Bytes: + b, err = d.Parser.ParseBytes() + default: + err = typeConversionError(t, String) + } + } + return +} + +// DecodeArray provides the implementation of the algorithm for decoding arrays, +// where f is called to decode each element of the array. +func (d Decoder) DecodeArray(f func(Decoder) error) (err error) { + var typ Type + + if d.off != 0 { + if d.off, err = 0, d.Parser.ParseMapValue(d.off-1); err != nil { + return + } + } + + if typ, err = d.Parser.ParseType(); err != nil { + return + } + + err = d.decodeArrayImpl(typ, f) + return +} + +func (d Decoder) decodeArrayImpl(t Type, f func(Decoder) error) (err error) { + var n int + + switch t { + case Nil: + err = d.Parser.ParseNil() + return + + case Array: + n, err = d.Parser.ParseArrayBegin() + + default: + err = typeConversionError(t, Array) + } + + if err != nil { + return + } + + i := 0 + + for n < 0 || i < n { + if n < 0 || i != 0 { + if err = d.Parser.ParseArrayNext(i); err != nil { + if err == End { + err = nil + break + } + return + } + } + if err = f(d); err != nil { + return + } + i++ + } + + err = d.Parser.ParseArrayEnd(i) + return +} + +// DecodeMap provides the implementation of the algorithm for decoding maps, +// where f is called to decode each pair of key and value. +// +// The function f is expected to decode two values from the map, the first one +// being the key and the second the associated value. The first decoder must be +// used to decode the key, the second one for the value. +func (d Decoder) DecodeMap(f func(Decoder, Decoder) error) (err error) { + var typ Type + + if d.off != 0 { + if d.off, err = 0, d.Parser.ParseMapValue(d.off-1); err != nil { + return + } + } + + if typ, err = d.Parser.ParseType(); err != nil { + return + } + + err = d.decodeMapImpl(typ, f) + return +} + +func (d Decoder) decodeMapImpl(t Type, f func(Decoder, Decoder) error) (err error) { + var n int + + switch t { + case Nil: + err = d.Parser.ParseNil() + return + + case Map: + n, err = d.Parser.ParseMapBegin() + + default: + err = typeConversionError(t, Map) + } + + if err != nil { + return + } + + i := 0 + + for n < 0 || i < n { + if n < 0 || i != 0 { + if err = d.Parser.ParseMapNext(i); err != nil { + if err == End { + err = nil + break + } + return + } + } + + d1 := d + d2 := d + d2.off = i + 1 + + if err = f(d1, d2); err != nil { + return + } + + i++ + } + + err = d.Parser.ParseMapEnd(i) + return +} + +// StreamDecoder decodes values in a streaming fashion, allowing an array to be +// consumed without loading it fully in memory. +// +// Instances of StreamDecoder are not safe for use by multiple goroutines. +type StreamDecoder struct { + // Parser to use to load values. + Parser Parser + + // MapType is used to override the type of maps produced by the decoder when + // there is not destination type (when decoding to an empty interface). + MapType reflect.Type + + err error + typ Type + cnt int + max int +} + +// NewStreamDecoder returns a new stream decoder that takes input from p. +// +// The function panics if p is nil. +func NewStreamDecoder(p Parser) *StreamDecoder { + if p == nil { + panic("objconv: the parser is nil") + } + return &StreamDecoder{Parser: p} +} + +// Len returns the number of values remaining to be read from the stream, which +// may be -1 if the underlying format doesn't provide this information. If an +// error occurred while decoding the stream the method returns zero because no +// more values can be read. +func (d *StreamDecoder) Len() int { + if d.err != nil { + return 0 + } + + if d.typ == Unknown { + if d.init() != nil { + return 0 + } + } + + return d.max - d.cnt +} + +// Err returns the last error returned by the Decode method. +// +// The method returns nil if the stream reached its natural end. +func (d *StreamDecoder) Err() error { + if d.err == End { + return nil + } + return d.err +} + +// Decodes the next value from the stream into v. +func (d *StreamDecoder) Decode(v interface{}) error { + if d.err != nil { + return d.err + } + + err := error(nil) + cnt := d.cnt + max := d.max + dec := Decoder{ + Parser: d.Parser, + MapType: d.MapType, + } + + switch d.typ { + case Unknown: + err = d.init() + max = d.max + case Array: + if cnt == max { + err = dec.Parser.ParseArrayEnd(cnt) + } else if cnt != 0 { + err = dec.Parser.ParseArrayNext(cnt) + } + } + + if err == nil { + if cnt == max { + err = End + } else { + switch err = dec.Decode(v); err { + case nil: + cnt++ + case End: + cnt++ + max = cnt + default: + if max < 0 && dec.Parser.ParseArrayEnd(cnt) == nil { + err = End + } + } + } + } + + d.err = err + d.cnt = cnt + d.max = max + return err +} + +// Encoder returns a new StreamEncoder which can be used to re-encode the stream +// decoded by d into e. +// +// The method panics if e is nil. +func (d *StreamDecoder) Encoder(e Emitter) (enc *StreamEncoder, err error) { + var typ Type + + if typ, err = d.Parser.ParseType(); err == nil { + enc = NewStreamEncoder(e) + enc.oneshot = typ != Array + } + + return +} + +func (d *StreamDecoder) init() error { + err := error(nil) + typ := Unknown + max := 0 + + if typ, err = d.Parser.ParseType(); err == nil { + switch typ { + default: + max = 1 + case Array: + max, err = d.Parser.ParseArrayBegin() + } + } + + d.err = err + d.typ = typ + d.max = max + return err +} + +// ValueDecoder is the interface that can be implemented by types that wish to +// provide their own decoding algorithms. +// +// The DecodeValue method is called when the value is found by a decoding +// algorithm. +type ValueDecoder interface { + DecodeValue(Decoder) error +} + +// ValueDecoderFunc allows the use of regular functions or methods as value +// decoders. +type ValueDecoderFunc func(Decoder) error + +// DecodeValue calls f(d). +func (f ValueDecoderFunc) DecodeValue(d Decoder) error { return f(d) } + +type decodeFuncOpts struct { + recurse bool + structs map[reflect.Type]*structType +} + +type decodeFunc func(Decoder, reflect.Value) (Type, error) + +func decodeFuncOf(t reflect.Type) decodeFunc { + return makeDecodeFunc(t, decodeFuncOpts{}) +} + +func makeDecodeFunc(t reflect.Type, opts decodeFuncOpts) decodeFunc { + if a, ok := AdapterOf(t); ok { + decode := a.Decode + return func(d Decoder, v reflect.Value) (Type, error) { + err := decode(d, v) + return Unknown /* just needs to not be Nil */, err + } + } + + // fast path: check if it's a basic go type + switch t { + case boolType: + return Decoder.decodeBool + + case stringType: + return Decoder.decodeString + + case bytesType: + return Decoder.decodeBytes + + case timeType: + return Decoder.decodeTime + + case durationType: + return Decoder.decodeDuration + + case emptyInterface: + return Decoder.decodeInterface + + case intType, int8Type, int16Type, int32Type, int64Type: + return Decoder.decodeInt + + case uintType, uint8Type, uint16Type, uint32Type, uint64Type, uintptrType: + return Decoder.decodeUint + + case float32Type, float64Type: + return Decoder.decodeFloat + } + + // check if it implements one of the special case interfaces, first on the + // plain type, then on the pointer type + switch { + case t.Implements(valueDecoderInterface): + return Decoder.decodeDecoder + + case t.Implements(errorInterface): + return Decoder.decodeError + + case t.Implements(binaryUnmarshalerInterface) && t.Implements(textUnmarshalerInterface): + return Decoder.decodeUnmarshaler + + case t.Implements(binaryUnmarshalerInterface): + return Decoder.decodeBinaryUnmarshaler + + case t.Implements(textUnmarshalerInterface): + return Decoder.decodeTextUnmarshaler + } + + switch p := reflect.PtrTo(t); { + case p.Implements(valueDecoderInterface): + return Decoder.decodeDecoderPointer + + case p.Implements(binaryUnmarshalerInterface) && p.Implements(textUnmarshalerInterface): + return Decoder.decodeUnmarshalerPointer + + case p.Implements(binaryUnmarshalerInterface): + return Decoder.decodeBinaryUnmarshalerPointer + + case p.Implements(textUnmarshalerInterface): + return Decoder.decodeTextUnmarshalerPointer + } + + // check what kind is the type, potentially generate a decoder + switch t.Kind() { + case reflect.Struct: + return makeDecodeStructFunc(t, opts) + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return Decoder.decodeBytes + } + return makeDecodeSliceFunc(t, opts) + + case reflect.Map: + return makeDecodeMapFunc(t, opts) + + case reflect.Ptr: + return makeDecodePtrFunc(t, opts) + + case reflect.Array: + return makeDecodeArrayFunc(t, opts) + + case reflect.Bool: + return Decoder.decodeBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return Decoder.decodeInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return Decoder.decodeUint + + case reflect.Float32, reflect.Float64: + return Decoder.decodeFloat + + case reflect.String: + return Decoder.decodeString + + default: + return Decoder.decodeUnsupported + } +} + +func makeDecodeSliceFunc(t reflect.Type, opts decodeFuncOpts) decodeFunc { + if !opts.recurse { + return Decoder.decodeSlice + } + f := makeDecodeFunc(t.Elem(), opts) + return func(d Decoder, v reflect.Value) (Type, error) { + return d.decodeSliceWith(v, f) + } +} + +func makeDecodeArrayFunc(t reflect.Type, opts decodeFuncOpts) decodeFunc { + if !opts.recurse { + return Decoder.decodeArray + } + f := makeDecodeFunc(t.Elem(), opts) + return func(d Decoder, v reflect.Value) (Type, error) { + return d.decodeArrayWith(v, f) + } +} + +func makeDecodeMapFunc(t reflect.Type, opts decodeFuncOpts) decodeFunc { + if !opts.recurse { + return Decoder.decodeMap + } + kf := makeDecodeFunc(t.Key(), opts) + vf := makeDecodeFunc(t.Elem(), opts) + return func(d Decoder, v reflect.Value) (Type, error) { + return d.decodeMapWith(v, kf, vf) + } +} + +func makeDecodeStructFunc(t reflect.Type, opts decodeFuncOpts) decodeFunc { + if !opts.recurse { + return Decoder.decodeStruct + } + s := newStructType(t, opts.structs) + return func(d Decoder, v reflect.Value) (Type, error) { + return d.decodeStructWith(v, s) + } +} + +func makeDecodePtrFunc(t reflect.Type, opts decodeFuncOpts) decodeFunc { + if !opts.recurse { + return Decoder.decodePointer + } + f := makeDecodeFunc(t.Elem(), opts) + return func(d Decoder, v reflect.Value) (Type, error) { + return d.decodePointerWith(v, f) + } +} + +// unsafeString returns a string that is only safe to use under the following conditions: +// - b points to data on the heap +// - the bytes pointed to by b will not be modified while the returned string exists +// - the returned string will not be stored past the lifetime of b +// if the method being called with an unsafe returns an error, that error may +// contain a reference to unsafe string. if the method produces consistent results, +// calling it again with a safe string should return an error that can be safely +// returned to the caller. +func unsafeString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/segmentio/objconv/emit.go b/vendor/github.com/segmentio/objconv/emit.go new file mode 100644 index 0000000..233af9a --- /dev/null +++ b/vendor/github.com/segmentio/objconv/emit.go @@ -0,0 +1,112 @@ +package objconv + +import "time" + +// The Emitter interface must be implemented by types that provide encoding +// of a specific format (like json, resp, ...). +// +// Emitters are not expected to be safe for use by multiple goroutines. +type Emitter interface { + // EmitNil writes a nil value to the writer. + EmitNil() error + + // EmitBool writes a boolean value to the writer. + EmitBool(bool) error + + // EmitInt writes an integer value to the writer. + EmitInt(v int64, bitSize int) error + + // EmitUint writes an unsigned integer value to the writer. + EmitUint(v uint64, bitSize int) error + + // EmitFloat writes a floating point value to the writer. + EmitFloat(v float64, bitSize int) error + + // EmitString writes a string value to the writer. + EmitString(string) error + + // EmitBytes writes a []byte value to the writer. + EmitBytes([]byte) error + + // EmitTime writes a time.Time value to the writer. + EmitTime(time.Time) error + + // EmitDuration writes a time.Duration value to the writer. + EmitDuration(time.Duration) error + + // EmitError writes an error value to the writer. + EmitError(error) error + + // EmitArrayBegin writes the beginning of an array value to the writer. + // The method receives the length of the array. + EmitArrayBegin(int) error + + // EmitArrayEnd writes the end of an array value to the writer. + EmitArrayEnd() error + + // EmitArrayNext is called after each array value except to the last one. + EmitArrayNext() error + + // EmitMapBegin writes the beginning of a map value to the writer. + // The method receives the length of the map. + EmitMapBegin(int) error + + // EmitMapEnd writes the end of a map value to the writer. + EmitMapEnd() error + + // EmitMapValue is called after each map key was written. + EmitMapValue() error + + // EmitMapNext is called after each map value was written except the last one. + EmitMapNext() error +} + +// The PrettyEmitter interface may be implemented by emitters supporting a more +// human-friendlly format. +type PrettyEmitter interface { + // PrettyEmitter returns a new emitter that outputs to the same writer in a + // pretty format. + PrettyEmitter() Emitter +} + +// The textEmitter interface may be implemented by emitters of human-readable +// formats. Such emitters instruct the encoder to prefer using +// encoding.TextMarshaler over encoding.BinaryMarshaler for example. +type textEmitter interface { + // EmitsText returns true if the emitter produces a human-readable format. + TextEmitter() bool +} + +func isTextEmitter(emitter Emitter) bool { + e, _ := emitter.(textEmitter) + return e != nil && e.TextEmitter() +} + +type discardEmitter struct{} + +func (e discardEmitter) EmitNil() error { return nil } +func (e discardEmitter) EmitBool(v bool) error { return nil } +func (e discardEmitter) EmitInt(v int64, _ int) error { return nil } +func (e discardEmitter) EmitUint(v uint64, _ int) error { return nil } +func (e discardEmitter) EmitFloat(v float64, _ int) error { return nil } +func (e discardEmitter) EmitString(v string) error { return nil } +func (e discardEmitter) EmitBytes(v []byte) error { return nil } +func (e discardEmitter) EmitTime(v time.Time) error { return nil } +func (e discardEmitter) EmitDuration(v time.Duration) error { return nil } +func (e discardEmitter) EmitError(v error) error { return nil } +func (e discardEmitter) EmitArrayBegin(v int) error { return nil } +func (e discardEmitter) EmitArrayEnd() error { return nil } +func (e discardEmitter) EmitArrayNext() error { return nil } +func (e discardEmitter) EmitMapBegin(v int) error { return nil } +func (e discardEmitter) EmitMapEnd() error { return nil } +func (e discardEmitter) EmitMapNext() error { return nil } +func (e discardEmitter) EmitMapValue() error { return nil } + +var ( + // Discard is a special emitter that outputs nothing and simply discards + // the values. + // + // This emitter is mostly useful to benchmark the encoder, but it can also be + // used to disable an encoder output if necessary. + Discard Emitter = discardEmitter{} +) diff --git a/vendor/github.com/segmentio/objconv/encode.go b/vendor/github.com/segmentio/objconv/encode.go new file mode 100644 index 0000000..2d178fb --- /dev/null +++ b/vendor/github.com/segmentio/objconv/encode.go @@ -0,0 +1,1017 @@ +package objconv + +import ( + "encoding" + "fmt" + "io" + "reflect" + "time" + "unsafe" +) + +// An Encoder implements the high-level encoding algorithm that inspect encoded +// values and drive the use of an Emitter to create a serialized representation +// of the data. +// +// Instances of Encoder are not safe for use by multiple goroutines. +type Encoder struct { + Emitter Emitter // the emitter used by this encoder + SortMapKeys bool // whether map keys should be sorted + key bool +} + +// NewEncoder returns a new encoder that outputs values to e. +// +// Encoders created by this function use the default encoder configuration, +// which is equivalent to using a zero-value EncoderConfig with only the Emitter +// field set. +// +// The function panics if e is nil. +func NewEncoder(e Emitter) *Encoder { + if e == nil { + panic("objconv: the emitter is nil") + } + return &Encoder{Emitter: e} +} + +// Encode encodes the generic value v. +func (e Encoder) Encode(v interface{}) (err error) { + if err = e.encodeMapValueMaybe(); err != nil { + return + } + + // This type switch optimizes encoding of common value types, it prevents + // the use of reflection to identify the type of the value, which saves a + // dynamic memory allocation. + switch x := v.(type) { + case nil: + return e.Emitter.EmitNil() + + case bool: + return e.Emitter.EmitBool(x) + + case int: + return e.Emitter.EmitInt(int64(x), 0) + + case int8: + return e.Emitter.EmitInt(int64(x), 8) + + case int16: + return e.Emitter.EmitInt(int64(x), 16) + + case int32: + return e.Emitter.EmitInt(int64(x), 32) + + case int64: + return e.Emitter.EmitInt(x, 64) + + case uint8: + return e.Emitter.EmitUint(uint64(x), 8) + + case uint16: + return e.Emitter.EmitUint(uint64(x), 16) + + case uint32: + return e.Emitter.EmitUint(uint64(x), 32) + + case uint64: + return e.Emitter.EmitUint(x, 64) + + case string: + return e.Emitter.EmitString(x) + + case []byte: + return e.Emitter.EmitBytes(x) + + case time.Time: + return e.Emitter.EmitTime(x) + + case time.Duration: + return e.Emitter.EmitDuration(x) + + case []string: + return e.encodeSliceOfString(x) + + case []interface{}: + return e.encodeSliceOfInterface(x) + + case map[string]string: + return e.encodeMapStringString(x) + + case map[string]interface{}: + return e.encodeMapStringInterface(x) + + case map[interface{}]interface{}: + return e.encodeMapInterfaceInterface(x) + + // Also checks for pointer types so the program can use this as a way + // to avoid the dynamic memory allocation done by runtime.convT2E for + // converting non-pointer types to empty interfaces. + case *bool: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitBool(*x) + + case *int: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitInt(int64(*x), int(8*unsafe.Sizeof(0))) + + case *int8: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitInt(int64(*x), 8) + + case *int16: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitInt(int64(*x), 16) + + case *int32: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitInt(int64(*x), 32) + + case *int64: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitInt(*x, 64) + + case *uint8: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitUint(uint64(*x), 8) + + case *uint16: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitUint(uint64(*x), 16) + + case *uint32: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitUint(uint64(*x), 32) + + case *uint64: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitUint(*x, 64) + + case *string: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitString(*x) + + case *[]byte: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitBytes(*x) + + case *time.Time: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitTime(*x) + + case *time.Duration: + if x == nil { + return e.Emitter.EmitNil() + } + return e.Emitter.EmitDuration(*x) + + case *[]string: + if x == nil { + return e.Emitter.EmitNil() + } + return e.encodeSliceOfString(*x) + + case *[]interface{}: + if x == nil { + return e.Emitter.EmitNil() + } + return e.encodeSliceOfInterface(*x) + + case *map[string]string: + if x == nil { + return e.Emitter.EmitNil() + } + return e.encodeMapStringString(*x) + + case *map[string]interface{}: + if x == nil { + return e.Emitter.EmitNil() + } + return e.encodeMapStringInterface(*x) + + case *map[interface{}]interface{}: + if x == nil { + return e.Emitter.EmitNil() + } + return e.encodeMapInterfaceInterface(*x) + + case ValueEncoder: + return x.EncodeValue(e) + + default: + return e.encode(reflect.ValueOf(v)) + } +} + +func (e *Encoder) encodeMapValueMaybe() (err error) { + if e.key { + e.key, err = false, e.Emitter.EmitMapValue() + } + return +} + +func (e Encoder) encode(v reflect.Value) error { + return encodeFuncOf(v.Type())(e, v) +} + +func (e Encoder) encodeBool(v reflect.Value) error { + return e.Emitter.EmitBool(v.Bool()) +} + +func (e Encoder) encodeInt(v reflect.Value) error { + return e.Emitter.EmitInt(v.Int(), 0) +} + +func (e Encoder) encodeInt8(v reflect.Value) error { + return e.Emitter.EmitInt(v.Int(), 8) +} + +func (e Encoder) encodeInt16(v reflect.Value) error { + return e.Emitter.EmitInt(v.Int(), 16) +} + +func (e Encoder) encodeInt32(v reflect.Value) error { + return e.Emitter.EmitInt(v.Int(), 32) +} + +func (e Encoder) encodeInt64(v reflect.Value) error { + return e.Emitter.EmitInt(v.Int(), 64) +} + +func (e Encoder) encodeUint(v reflect.Value) error { + return e.Emitter.EmitUint(v.Uint(), 0) +} + +func (e Encoder) encodeUint8(v reflect.Value) error { + return e.Emitter.EmitUint(v.Uint(), 8) +} + +func (e Encoder) encodeUint16(v reflect.Value) error { + return e.Emitter.EmitUint(v.Uint(), 16) +} + +func (e Encoder) encodeUint32(v reflect.Value) error { + return e.Emitter.EmitUint(v.Uint(), 32) +} + +func (e Encoder) encodeUint64(v reflect.Value) error { + return e.Emitter.EmitUint(v.Uint(), 64) +} + +func (e Encoder) encodeUintptr(v reflect.Value) error { + return e.Emitter.EmitUint(v.Uint(), 0) +} + +func (e Encoder) encodeFloat32(v reflect.Value) error { + return e.Emitter.EmitFloat(v.Float(), 32) +} + +func (e Encoder) encodeFloat64(v reflect.Value) error { + return e.Emitter.EmitFloat(v.Float(), 64) +} + +func (e Encoder) encodeString(v reflect.Value) error { + return e.Emitter.EmitString(v.String()) +} + +func (e Encoder) encodeBytes(v reflect.Value) error { + return e.Emitter.EmitBytes(v.Bytes()) +} + +func (e Encoder) encodeTime(v reflect.Value) error { + var t time.Time + + // Here we may receive either a pointer or a plain value because there is a + // special case for *time.Time in the encoder to avoid having it match the + // encoding.TextMarshaler interface and instead treat it the same way than + // if we had gotten the plain value right away. + // + // As a side effect, this also sometimes permit more optimizations because + // having a pointer will likely avoid a memory allocation when calling + // Interface on the value. + if v.Kind() != reflect.Ptr { + t = v.Interface().(time.Time) + } else { + if ptr := v.Interface().(*time.Time); ptr == nil { + return e.Emitter.EmitNil() + } else { + t = *ptr + } + } + + return e.Emitter.EmitTime(t) +} + +func (e Encoder) encodeDuration(v reflect.Value) error { + return e.Emitter.EmitDuration(time.Duration(v.Int())) +} + +func (e Encoder) encodeError(v reflect.Value) error { + return e.Emitter.EmitError(v.Interface().(error)) +} + +func (e Encoder) encodeArray(v reflect.Value) error { + return e.encodeArrayWith(v, encodeFuncOf(v.Type().Elem())) +} + +func (e Encoder) encodeArrayWith(v reflect.Value, f encodeFunc) error { + i := 0 + return e.EncodeArray(v.Len(), func(e Encoder) (err error) { + err = f(e, v.Index(i)) + i++ + return + }) +} + +func (e Encoder) encodeSliceOfString(a []string) error { + i := 0 + return e.EncodeArray(len(a), func(e Encoder) (err error) { + err = e.Emitter.EmitString(a[i]) + i++ + return + }) +} + +func (e Encoder) encodeSliceOfInterface(a []interface{}) error { + i := 0 + return e.EncodeArray(len(a), func(e Encoder) (err error) { + err = e.Encode(a[i]) + i++ + return + }) +} + +func (e Encoder) encodeMap(v reflect.Value) error { + t := v.Type() + kf := encodeFuncOf(t.Key()) + vf := encodeFuncOf(t.Elem()) + return e.encodeMapWith(v, kf, vf) +} + +func (e Encoder) encodeMapWith(v reflect.Value, kf encodeFunc, vf encodeFunc) error { + t := v.Type() + + if !e.SortMapKeys { + switch { + case t.ConvertibleTo(mapInterfaceInterfaceType): + return e.encodeMapInterfaceInterfaceValue(v.Convert(mapInterfaceInterfaceType)) + + case t.ConvertibleTo(mapStringInterfaceType): + return e.encodeMapStringInterfaceValue(v.Convert(mapStringInterfaceType)) + + case t.ConvertibleTo(mapStringStringType): + return e.encodeMapStringStringValue(v.Convert(mapStringStringType)) + } + } + + var k []reflect.Value + var n = v.Len() + var i = 0 + + if n != 0 { + k = v.MapKeys() + + if e.SortMapKeys { + sortValues(t.Key(), k) + } + } + + return e.EncodeMap(n, func(ke Encoder, ve Encoder) (err error) { + if err = kf(e, k[i]); err != nil { + return + } + if err = e.Emitter.EmitMapValue(); err != nil { + return + } + if err = vf(e, v.MapIndex(k[i])); err != nil { + return + } + i++ + return + }) +} + +func (e Encoder) encodeMapInterfaceInterfaceValue(v reflect.Value) error { + return e.encodeMapInterfaceInterface(v.Interface().(map[interface{}]interface{})) +} + +func (e Encoder) encodeMapInterfaceInterface(m map[interface{}]interface{}) (err error) { + n := len(m) + i := 0 + + if err = e.Emitter.EmitMapBegin(n); err != nil { + return + } + + for k, v := range m { + if i != 0 { + if err = e.Emitter.EmitMapNext(); err != nil { + return + } + } + if err = e.Encode(k); err != nil { + return + } + if err = e.Emitter.EmitMapValue(); err != nil { + return + } + if err = e.Encode(v); err != nil { + return + } + i++ + } + + return e.Emitter.EmitMapEnd() +} + +func (e Encoder) encodeMapStringInterfaceValue(v reflect.Value) error { + return e.encodeMapStringInterface(v.Interface().(map[string]interface{})) +} + +func (e Encoder) encodeMapStringInterface(m map[string]interface{}) (err error) { + n := len(m) + i := 0 + + if err = e.Emitter.EmitMapBegin(n); err != nil { + return + } + + for k, v := range m { + if i != 0 { + if err = e.Emitter.EmitMapNext(); err != nil { + return + } + } + if err = e.Emitter.EmitString(k); err != nil { + return + } + if err = e.Emitter.EmitMapValue(); err != nil { + return + } + if err = e.Encode(v); err != nil { + return + } + i++ + } + + return e.Emitter.EmitMapEnd() +} + +func (e Encoder) encodeMapStringStringValue(v reflect.Value) error { + return e.encodeMapStringString(v.Interface().(map[string]string)) +} + +func (e Encoder) encodeMapStringString(m map[string]string) (err error) { + n := len(m) + i := 0 + + if err = e.Emitter.EmitMapBegin(n); err != nil { + return + } + + for k, v := range m { + if i != 0 { + if err = e.Emitter.EmitMapNext(); err != nil { + return + } + } + if err = e.Emitter.EmitString(k); err != nil { + return + } + if err = e.Emitter.EmitMapValue(); err != nil { + return + } + if err = e.Emitter.EmitString(v); err != nil { + return + } + i++ + } + + return e.Emitter.EmitMapEnd() +} + +func (e Encoder) encodeStruct(v reflect.Value) error { + return e.encodeStructWith(v, structCache.lookup(v.Type())) +} + +func (e Encoder) encodeStructWith(v reflect.Value, s *structType) (err error) { + n := 0 + + for i := range s.fields { + f := &s.fields[i] + if !f.omit(v.FieldByIndex(f.index)) { + n++ + } + } + + if err = e.Emitter.EmitMapBegin(n); err != nil { + return + } + n = 0 + + for i := range s.fields { + f := &s.fields[i] + if fv := v.FieldByIndex(f.index); !f.omit(fv) { + if n != 0 { + if err = e.Emitter.EmitMapNext(); err != nil { + return + } + } + if err = e.Emitter.EmitString(f.name); err != nil { + return + } + if err = e.Emitter.EmitMapValue(); err != nil { + return + } + if err = f.encode(e, fv); err != nil { + return + } + n++ + } + } + + return e.Emitter.EmitMapEnd() +} + +func (e Encoder) encodePointer(v reflect.Value) error { + return e.encodePointerWith(v, encodeFuncOf(v.Type().Elem())) +} + +func (e Encoder) encodePointerWith(v reflect.Value, f encodeFunc) error { + if v.IsNil() { + return e.Emitter.EmitNil() + } + return f(e, v.Elem()) +} + +func (e Encoder) encodeInterface(v reflect.Value) error { + if v.IsNil() { + return e.Emitter.EmitNil() + } + return e.encode(v.Elem()) +} + +func (e Encoder) encodeEncoder(v reflect.Value) error { + return v.Interface().(ValueEncoder).EncodeValue(e) +} + +func (e Encoder) encodeMarshaler(v reflect.Value) error { + if isTextEmitter(e.Emitter) { + return e.encodeTextMarshaler(v) + } + return e.encodeBinaryMarshaler(v) +} + +func (e Encoder) encodeBinaryMarshaler(v reflect.Value) error { + b, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary() + if err == nil { + err = e.Emitter.EmitBytes(b) + } + return err +} + +func (e Encoder) encodeTextMarshaler(v reflect.Value) error { + b, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err == nil { + err = e.Emitter.EmitString(stringNoCopy(b)) + } + return err +} + +func (e Encoder) encodeUnsupported(v reflect.Value) error { + return fmt.Errorf("objconv: the encoder doesn't support values of type %s", v.Type()) +} + +// EncodeArray provides the implementation of the array encoding algorithm, +// where n is the number of elements in the array, and f a function called to +// encode each element. +// +// The n argument can be set to a negative value to indicate that the program +// doesn't know how many elements it will output to the array. Be mindful that +// not all emitters support encoding arrays of unknown lengths. +// +// The f function is called to encode each element of the array. +func (e Encoder) EncodeArray(n int, f func(Encoder) error) (err error) { + if e.key { + if e.key, err = false, e.Emitter.EmitMapValue(); err != nil { + return + } + } + + if err = e.Emitter.EmitArrayBegin(n); err != nil { + return + } + +encodeArray: + for i := 0; n < 0 || i < n; i++ { + if i != 0 { + if e.Emitter.EmitArrayNext(); err != nil { + return + } + } + switch err = f(e); err { + case nil: + case End: + break encodeArray + default: + return + } + } + + return e.Emitter.EmitArrayEnd() +} + +// EncodeMap provides the implementation of the map encoding algorithm, where n +// is the number of elements in the map, and f a function called to encode each +// element. +// +// The n argument can be set to a negative value to indicate that the program +// doesn't know how many elements it will output to the map. Be mindful that not +// all emitters support encoding maps of unknown length. +// +// The f function is called to encode each element of the map, it is expected to +// encode two values, the first one being the key, follow by the associated value. +// The first encoder must be used to encode the key, the second for the value. +func (e Encoder) EncodeMap(n int, f func(Encoder, Encoder) error) (err error) { + if e.key { + if e.key, err = false, e.Emitter.EmitMapValue(); err != nil { + return + } + } + + if err = e.Emitter.EmitMapBegin(n); err != nil { + return + } + +encodeMap: + for i := 0; n < 0 || i < n; i++ { + if i != 0 { + if err = e.Emitter.EmitMapNext(); err != nil { + return + } + } + e.key = true + err = f( + Encoder{Emitter: e.Emitter, SortMapKeys: e.SortMapKeys}, + Encoder{Emitter: e.Emitter, SortMapKeys: e.SortMapKeys, key: true}, + ) + // Because internal calls don't use the exported methods they may not + // reset this flag to false when expected, forcing the value here. + e.key = false + + switch err { + case nil: + case End: + break encodeMap + default: + return + } + } + + return e.Emitter.EmitMapEnd() +} + +// A StreamEncoder encodes and writes a stream of values to an output stream. +// +// Instances of StreamEncoder are not safe for use by multiple goroutines. +type StreamEncoder struct { + Emitter Emitter // the emitter used by this encoder + SortMapKeys bool // whether map keys should be sorted + + err error + max int + cnt int + opened bool + closed bool + oneshot bool +} + +// NewStreamEncoder returns a new stream encoder that outputs to e. +// +// The function panics if e is nil. +func NewStreamEncoder(e Emitter) *StreamEncoder { + if e == nil { + panic("objconv.NewStreamEncoder: the emitter is nil") + } + return &StreamEncoder{Emitter: e} +} + +// Open explicitly tells the encoder to start the stream, setting the number +// of values to n. +// +// Depending on the actual format that the stream is encoding to, n may or +// may not have to be accurate, some formats also support passing a negative +// value to indicate that the number of elements is unknown. +func (e *StreamEncoder) Open(n int) error { + if err := e.err; err != nil { + return err + } + + if e.closed { + return io.ErrClosedPipe + } + + if !e.opened { + e.max = n + e.opened = true + + if !e.oneshot { + e.err = e.Emitter.EmitArrayBegin(n) + } + } + + return e.err +} + +// Close terminates the stream encoder. +func (e *StreamEncoder) Close() error { + if !e.closed { + if err := e.Open(-1); err != nil { + return err + } + + e.closed = true + + if !e.oneshot { + e.err = e.Emitter.EmitArrayEnd() + } + } + + return e.err +} + +// Encode writes v to the stream, encoding it based on the emitter configured +// on e. +func (e *StreamEncoder) Encode(v interface{}) error { + if err := e.Open(-1); err != nil { + return err + } + + if e.max >= 0 && e.cnt >= e.max { + return fmt.Errorf("objconv: too many values sent to a stream encoder exceed the configured limit of %d", e.max) + } + + if !e.oneshot && e.cnt != 0 { + e.err = e.Emitter.EmitArrayNext() + } + + if e.err == nil { + e.err = (Encoder{ + Emitter: e.Emitter, + SortMapKeys: e.SortMapKeys, + }).Encode(v) + + if e.cnt++; e.max >= 0 && e.cnt >= e.max { + e.Close() + } + } + + return e.err +} + +// ValueEncoder is the interface that can be implemented by types that wish to +// provide their own encoding algorithms. +// +// The EncodeValue method is called when the value is found by an encoding +// algorithm. +type ValueEncoder interface { + EncodeValue(Encoder) error +} + +// ValueEncoderFunc allows the use of regular functions or methods as value +// encoders. +type ValueEncoderFunc func(Encoder) error + +// EncodeValue calls f(e). +func (f ValueEncoderFunc) EncodeValue(e Encoder) error { return f(e) } + +// encodeFuncOpts is used to configure how the encodeFuncOf behaves. +type encodeFuncOpts struct { + recurse bool + structs map[reflect.Type]*structType +} + +// encodeFunc is the prototype of functions that encode values. +type encodeFunc func(Encoder, reflect.Value) error + +// encodeFuncOf returns an encoder function for t. +func encodeFuncOf(t reflect.Type) encodeFunc { + return makeEncodeFunc(t, encodeFuncOpts{}) +} + +func makeEncodeFunc(t reflect.Type, opts encodeFuncOpts) encodeFunc { + if adapter, ok := AdapterOf(t); ok { + return adapter.Encode + } + + switch t { + case boolType: + return Encoder.encodeBool + + case stringType: + return Encoder.encodeString + + case bytesType: + return Encoder.encodeBytes + + case timeType, timePtrType: + return Encoder.encodeTime + + case durationType: + return Encoder.encodeDuration + + case emptyInterface: + return Encoder.encodeInterface + + case intType: + return Encoder.encodeInt + + case int8Type: + return Encoder.encodeInt8 + + case int16Type: + return Encoder.encodeInt16 + + case int32Type: + return Encoder.encodeInt32 + + case int64Type: + return Encoder.encodeInt64 + + case uintType: + return Encoder.encodeUint + + case uint8Type: + return Encoder.encodeUint8 + + case uint16Type: + return Encoder.encodeUint16 + + case uint32Type: + return Encoder.encodeUint32 + + case uint64Type: + return Encoder.encodeUint64 + + case uintptrType: + return Encoder.encodeUintptr + + case float32Type: + return Encoder.encodeFloat32 + + case float64Type: + return Encoder.encodeFloat64 + } + + switch { + case t.Implements(valueEncoderInterface): + return Encoder.encodeEncoder + + case t.Implements(binaryMarshalerInterface) && t.Implements(textMarshalerInterface): + return Encoder.encodeMarshaler + + case t.Implements(binaryMarshalerInterface): + return Encoder.encodeBinaryMarshaler + + case t.Implements(textMarshalerInterface): + return Encoder.encodeTextMarshaler + + case t.Implements(errorInterface): + return Encoder.encodeError + } + + switch t.Kind() { + case reflect.Struct: + return makeEncodeStructFunc(t, opts) + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return Encoder.encodeBytes + } + return makeEncodeArrayFunc(t, opts) + + case reflect.Map: + return makeEncodeMapFunc(t, opts) + + case reflect.Ptr: + return makeEncodePtrFunc(t, opts) + + case reflect.Array: + return makeEncodeArrayFunc(t, opts) + + case reflect.String: + return Encoder.encodeString + + case reflect.Bool: + return Encoder.encodeBool + + case reflect.Int: + return Encoder.encodeInt + + case reflect.Int8: + return Encoder.encodeInt8 + + case reflect.Int16: + return Encoder.encodeInt16 + + case reflect.Int32: + return Encoder.encodeInt32 + + case reflect.Int64: + return Encoder.encodeInt64 + + case reflect.Uint: + return Encoder.encodeUint + + case reflect.Uint8: + return Encoder.encodeUint8 + + case reflect.Uint16: + return Encoder.encodeUint16 + + case reflect.Uint32: + return Encoder.encodeUint32 + + case reflect.Uint64: + return Encoder.encodeUint64 + + case reflect.Uintptr: + return Encoder.encodeUintptr + + case reflect.Float32: + return Encoder.encodeFloat32 + + case reflect.Float64: + return Encoder.encodeFloat64 + + default: + return Encoder.encodeUnsupported + } +} + +func makeEncodeArrayFunc(t reflect.Type, opts encodeFuncOpts) encodeFunc { + if !opts.recurse { + return Encoder.encodeArray + } + f := makeEncodeFunc(t.Elem(), opts) + return func(e Encoder, v reflect.Value) error { + return e.encodeArrayWith(v, f) + } +} + +func makeEncodeMapFunc(t reflect.Type, opts encodeFuncOpts) encodeFunc { + if !opts.recurse { + return Encoder.encodeMap + } + kf := makeEncodeFunc(t.Key(), opts) + vf := makeEncodeFunc(t.Elem(), opts) + return func(e Encoder, v reflect.Value) error { + return e.encodeMapWith(v, kf, vf) + } +} + +func makeEncodeStructFunc(t reflect.Type, opts encodeFuncOpts) encodeFunc { + if !opts.recurse { + return Encoder.encodeStruct + } + s := newStructType(t, opts.structs) + return func(e Encoder, v reflect.Value) error { + return e.encodeStructWith(v, s) + } +} + +func makeEncodePtrFunc(t reflect.Type, opts encodeFuncOpts) encodeFunc { + if !opts.recurse { + return Encoder.encodePointer + } + f := makeEncodeFunc(t.Elem(), opts) + return func(e Encoder, v reflect.Value) error { + return e.encodePointerWith(v, f) + } +} diff --git a/vendor/github.com/segmentio/objconv/error.go b/vendor/github.com/segmentio/objconv/error.go new file mode 100644 index 0000000..17a49f3 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/error.go @@ -0,0 +1,20 @@ +package objconv + +import ( + "errors" + "fmt" +) + +func typeConversionError(from Type, to Type) error { + return fmt.Errorf("objconv: cannot convert from %s to %s", from, to) +} + +var ( + // End is expected to be returned to indicate that a function has completed + // its work, this is usually employed in generic algorithms. + End = errors.New("end") + + // This error value is used as a building block for reflection and is never + // returned by the package. + errBase = errors.New("") +) diff --git a/vendor/github.com/segmentio/objconv/json/decode.go b/vendor/github.com/segmentio/objconv/json/decode.go new file mode 100644 index 0000000..bb36640 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/json/decode.go @@ -0,0 +1,52 @@ +package json + +import ( + "bytes" + "io" + "sync" + + "github.com/segmentio/objconv" +) + +// NewDecoder returns a new JSON decoder that parses values from r. +func NewDecoder(r io.Reader) *objconv.Decoder { + return objconv.NewDecoder(NewParser(r)) +} + +// NewStreamDecoder returns a new JSON stream decoder that parses values from r. +func NewStreamDecoder(r io.Reader) *objconv.StreamDecoder { + return objconv.NewStreamDecoder(NewParser(r)) +} + +// Unmarshal decodes a JSON representation of v from b. +func Unmarshal(b []byte, v interface{}) error { + u := unmarshalerPool.Get().(*unmarshaler) + u.reset(b) + + err := (objconv.Decoder{Parser: u}).Decode(v) + + u.reset(nil) + unmarshalerPool.Put(u) + return err +} + +var unmarshalerPool = sync.Pool{ + New: func() interface{} { return newUnmarshaler() }, +} + +type unmarshaler struct { + Parser + b bytes.Buffer +} + +func newUnmarshaler() *unmarshaler { + u := &unmarshaler{} + u.s = u.c[:0] + u.r = &u.b + return u +} + +func (u *unmarshaler) reset(b []byte) { + u.b = *bytes.NewBuffer(b) + u.Reset(&u.b) +} diff --git a/vendor/github.com/segmentio/objconv/json/emit.go b/vendor/github.com/segmentio/objconv/json/emit.go new file mode 100644 index 0000000..b89aa85 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/json/emit.go @@ -0,0 +1,358 @@ +package json + +import ( + "encoding/base64" + "errors" + "io" + "math" + "strconv" + "time" + + "github.com/segmentio/objconv" + "github.com/segmentio/objconv/objutil" +) + +var ( + nullBytes = [...]byte{'n', 'u', 'l', 'l'} + trueBytes = [...]byte{'t', 'r', 'u', 'e'} + falseBytes = [...]byte{'f', 'a', 'l', 's', 'e'} + + arrayOpen = [...]byte{'['} + arrayClose = [...]byte{']'} + + mapOpen = [...]byte{'{'} + mapClose = [...]byte{'}'} + + comma = [...]byte{','} + column = [...]byte{':'} + + newline = [...]byte{'\n'} + spaces = [...]byte{' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '} +) + +// Emitter implements a JSON emitter that satisfies the objconv.Emitter +// interface. +type Emitter struct { + w io.Writer + s []byte + a [128]byte +} + +func NewEmitter(w io.Writer) *Emitter { + e := &Emitter{w: w} + e.s = e.a[:0] + return e +} + +func (e *Emitter) Reset(w io.Writer) { + e.w = w +} + +func (e *Emitter) EmitNil() (err error) { + _, err = e.w.Write(nullBytes[:]) + return +} + +func (e *Emitter) EmitBool(v bool) (err error) { + if v { + _, err = e.w.Write(trueBytes[:]) + } else { + _, err = e.w.Write(falseBytes[:]) + } + return +} + +func (e *Emitter) EmitInt(v int64, _ int) (err error) { + _, err = e.w.Write(strconv.AppendInt(e.s[:0], v, 10)) + return +} + +func (e *Emitter) EmitUint(v uint64, _ int) (err error) { + _, err = e.w.Write(strconv.AppendUint(e.s[:0], v, 10)) + return +} + +func (e *Emitter) EmitFloat(v float64, bitSize int) (err error) { + switch { + case math.IsNaN(v): + err = errors.New("NaN has no json representation") + + case math.IsInf(v, +1): + err = errors.New("+Inf has no json representation") + + case math.IsInf(v, -1): + err = errors.New("-Inf has no json representation") + + default: + _, err = e.w.Write(strconv.AppendFloat(e.s[:0], v, 'g', -1, bitSize)) + } + return +} + +func (e *Emitter) EmitString(v string) (err error) { + i := 0 + j := 0 + n := len(v) + s := append(e.s[:0], '"') + + for j != n { + b := v[j] + j++ + + switch b { + case '"', '\\': + // b = b + + case '\b': + b = 'b' + + case '\f': + b = 'f' + + case '\n': + b = 'n' + + case '\r': + b = 'r' + + case '\t': + b = 't' + + default: + continue + } + + s = append(s, v[i:j-1]...) + s = append(s, '\\', b) + i = j + } + + s = append(s, v[i:j]...) + s = append(s, '"') + e.s = s[:0] // in case the buffer was reallocated + + _, err = e.w.Write(s) + return +} + +func (e *Emitter) EmitBytes(v []byte) (err error) { + s := e.s[:0] + n := base64.StdEncoding.EncodedLen(len(v)) + 2 + + if cap(s) < n { + s = make([]byte, 0, align(n, 1024)) + e.s = s + } + + s = s[:n] + s[0] = '"' + base64.StdEncoding.Encode(s[1:], v) + s[n-1] = '"' + + _, err = e.w.Write(s) + return +} + +func (e *Emitter) EmitTime(v time.Time) (err error) { + s := e.s[:0] + + s = append(s, '"') + s = v.AppendFormat(s, time.RFC3339Nano) + s = append(s, '"') + + e.s = s[:0] + _, err = e.w.Write(s) + return +} + +func (e *Emitter) EmitDuration(v time.Duration) (err error) { + s := e.s[:0] + + s = append(s, '"') + s = objutil.AppendDuration(s, v) + s = append(s, '"') + + e.s = s[:0] + _, err = e.w.Write(s) + return +} + +func (e *Emitter) EmitError(v error) (err error) { + return e.EmitString(v.Error()) +} + +func (e *Emitter) EmitArrayBegin(_ int) (err error) { + _, err = e.w.Write(arrayOpen[:]) + return +} + +func (e *Emitter) EmitArrayEnd() (err error) { + _, err = e.w.Write(arrayClose[:]) + return +} + +func (e *Emitter) EmitArrayNext() (err error) { + _, err = e.w.Write(comma[:]) + return +} + +func (e *Emitter) EmitMapBegin(_ int) (err error) { + _, err = e.w.Write(mapOpen[:]) + return +} + +func (e *Emitter) EmitMapEnd() (err error) { + _, err = e.w.Write(mapClose[:]) + return +} + +func (e *Emitter) EmitMapValue() (err error) { + _, err = e.w.Write(column[:]) + return +} + +func (e *Emitter) EmitMapNext() (err error) { + _, err = e.w.Write(comma[:]) + return +} + +func (e *Emitter) TextEmitter() bool { + return true +} + +func (e *Emitter) PrettyEmitter() objconv.Emitter { + return NewPrettyEmitter(e.w) +} + +func align(n int, a int) int { + if (n % a) == 0 { + return n + } + return ((n / a) + 1) * a +} + +type PrettyEmitter struct { + Emitter + i int + s []int + a [8]int +} + +func NewPrettyEmitter(w io.Writer) *PrettyEmitter { + e := &PrettyEmitter{ + Emitter: *NewEmitter(w), + } + e.s = e.a[:0] + return e +} + +func (e *PrettyEmitter) Reset(w io.Writer) { + e.Emitter.Reset(w) + e.i = 0 + e.s = e.s[:0] +} + +func (e *PrettyEmitter) EmitArrayBegin(n int) (err error) { + if err = e.Emitter.EmitArrayBegin(n); err != nil { + return + } + if e.push(n) != 0 { + err = e.indent() + } + return +} + +func (e *PrettyEmitter) EmitArrayEnd() (err error) { + if e.pop() != 0 { + if err = e.indent(); err != nil { + return + } + } + return e.Emitter.EmitArrayEnd() +} + +func (e *PrettyEmitter) EmitArrayNext() (err error) { + if err = e.Emitter.EmitArrayNext(); err != nil { + return + } + return e.indent() +} + +func (e *PrettyEmitter) EmitMapBegin(n int) (err error) { + if err = e.Emitter.EmitMapBegin(n); err != nil { + return + } + if e.push(n) != 0 { + err = e.indent() + } + return +} + +func (e *PrettyEmitter) EmitMapEnd() (err error) { + if e.pop() != 0 { + if err = e.indent(); err != nil { + return + } + } + return e.Emitter.EmitMapEnd() +} + +func (e *PrettyEmitter) EmitMapValue() (err error) { + if err = e.Emitter.EmitMapValue(); err != nil { + return + } + _, err = e.w.Write(spaces[:1]) + return +} + +func (e *PrettyEmitter) EmitMapNext() (err error) { + if err = e.Emitter.EmitMapNext(); err != nil { + return + } + return e.indent() +} + +func (e *PrettyEmitter) TextEmitter() bool { + return true +} + +func (e *PrettyEmitter) indent() (err error) { + if _, err = e.w.Write(newline[:]); err != nil { + return + } + + for n := 2 * e.i; n != 0; { + n1 := n + n2 := len(spaces) + + if n1 > n2 { + n1 = n2 + } + + if _, err = e.w.Write(spaces[:n1]); err != nil { + return + } + + n -= n1 + } + + return +} + +func (e *PrettyEmitter) push(n int) int { + if n != 0 { + e.i++ + } + e.s = append(e.s, n) + return n +} + +func (e *PrettyEmitter) pop() int { + i := len(e.s) - 1 + n := e.s[i] + e.s = e.s[:i] + if n != 0 { + e.i-- + } + return n +} diff --git a/vendor/github.com/segmentio/objconv/json/encode.go b/vendor/github.com/segmentio/objconv/json/encode.go new file mode 100644 index 0000000..8ebaabd --- /dev/null +++ b/vendor/github.com/segmentio/objconv/json/encode.go @@ -0,0 +1,59 @@ +package json + +import ( + "bytes" + "io" + "sync" + + "github.com/segmentio/objconv" +) + +// NewEncoder returns a new JSON encoder that writes to w. +func NewEncoder(w io.Writer) *objconv.Encoder { + return objconv.NewEncoder(NewEmitter(w)) +} + +// NewStreamEncoder returns a new JSON stream encoder that writes to w. +func NewStreamEncoder(w io.Writer) *objconv.StreamEncoder { + return objconv.NewStreamEncoder(NewEmitter(w)) +} + +// NewPrettyEncoder returns a new JSON encoder that writes to w. +func NewPrettyEncoder(w io.Writer) *objconv.Encoder { + return objconv.NewEncoder(NewPrettyEmitter(w)) +} + +// NewPrettyStreamEncoder returns a new JSON stream encoder that writes to w. +func NewPrettyStreamEncoder(w io.Writer) *objconv.StreamEncoder { + return objconv.NewStreamEncoder(NewPrettyEmitter(w)) +} + +// Marshal writes the JSON representation of v to a byte slice returned in b. +func Marshal(v interface{}) (b []byte, err error) { + m := marshalerPool.Get().(*marshaler) + m.b.Truncate(0) + + if err = (objconv.Encoder{Emitter: m}).Encode(v); err == nil { + b = make([]byte, m.b.Len()) + copy(b, m.b.Bytes()) + } + + marshalerPool.Put(m) + return +} + +var marshalerPool = sync.Pool{ + New: func() interface{} { return newMarshaler() }, +} + +type marshaler struct { + Emitter + b bytes.Buffer +} + +func newMarshaler() *marshaler { + m := &marshaler{} + m.s = m.a[:0] + m.w = &m.b + return m +} diff --git a/vendor/github.com/segmentio/objconv/json/init.go b/vendor/github.com/segmentio/objconv/json/init.go new file mode 100644 index 0000000..95367a4 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/json/init.go @@ -0,0 +1,29 @@ +package json + +import ( + "io" + + "github.com/segmentio/objconv" +) + +// Codec for the JSON format. +var Codec = objconv.Codec{ + NewEmitter: func(w io.Writer) objconv.Emitter { return NewEmitter(w) }, + NewParser: func(r io.Reader) objconv.Parser { return NewParser(r) }, +} + +// PrettyCodec for the JSON format. +var PrettyCodec = objconv.Codec{ + NewEmitter: func(w io.Writer) objconv.Emitter { return NewPrettyEmitter(w) }, + NewParser: func(r io.Reader) objconv.Parser { return NewParser(r) }, +} + +func init() { + for _, name := range [...]string{ + "application/json", + "text/json", + "json", + } { + objconv.Register(name, Codec) + } +} diff --git a/vendor/github.com/segmentio/objconv/json/parse.go b/vendor/github.com/segmentio/objconv/json/parse.go new file mode 100644 index 0000000..58dec92 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/json/parse.go @@ -0,0 +1,498 @@ +package json + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "reflect" + "strconv" + "time" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/segmentio/objconv" + "github.com/segmentio/objconv/objutil" +) + +type Parser struct { + r io.Reader // reader to load bytes from + s []byte // buffer used for building strings + i int // offset of the first byte in b + j int // offset of the last byte in b + b [128]byte // buffer where bytes are loaded from the reader + c [128]byte // initial backend array for s +} + +func NewParser(r io.Reader) *Parser { + p := &Parser{r: r} + p.s = p.c[:0] + return p +} + +func (p *Parser) Reset(r io.Reader) { + p.r = r + p.i = 0 + p.j = 0 +} + +func (p *Parser) Buffered() io.Reader { + return bytes.NewReader(p.b[p.i:p.j]) +} + +func (p *Parser) ParseType() (t objconv.Type, err error) { + var b byte + + if err = p.skipSpaces(); err != nil { + return + } + + if b, err = p.peekByteAt(0); err != nil { + return + } + + switch { + case b == '"': + t = objconv.String + + case b == '{': + t = objconv.Map + + case b == '[': + t = objconv.Array + + case b == 'n': + t = objconv.Nil + + case b == 't': + t = objconv.Bool + + case b == 'f': + t = objconv.Bool + + case b == '-' || (b >= '0' && b <= '9'): + t = objconv.Int + + chunk, _ := p.peekNumber() + + for _, c := range chunk { + if c == '.' || c == 'e' || c == 'E' { + t = objconv.Float + break + } + } + + // Cache the result of peekNumber for the following call to ParseInt or + // ParseFloat. + p.s = append(p.s[:0], chunk...) + + default: + err = fmt.Errorf("objconv/json: expected token but found '%c'", b) + } + + return +} + +func (p *Parser) ParseNil() (err error) { + return p.readToken(nullBytes[:]) +} + +func (p *Parser) ParseBool() (v bool, err error) { + var b byte + + if b, err = p.peekByteAt(0); err != nil { + return + } + + switch b { + case 'f': + v, err = false, p.readToken(falseBytes[:]) + + case 't': + v, err = true, p.readToken(trueBytes[:]) + + default: + err = fmt.Errorf("objconv/json: expected boolean but found '%c'", b) + } + + return +} + +func (p *Parser) ParseInt() (v int64, err error) { + if v, err = objutil.ParseInt(p.s); err != nil { + return + } + p.i += len(p.s) + return +} + +func (p *Parser) ParseUint() (v uint64, err error) { + panic("objconv/json: ParseUint should never be called because JSON has no unsigned integer type, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseFloat() (v float64, err error) { + if v, err = strconv.ParseFloat(stringNoCopy(p.s), 64); err != nil { + return + } + p.i += len(p.s) + return +} + +func (p *Parser) ParseString() (v []byte, err error) { + if p.i == p.j { + if err = p.fill(); err != nil { + return + } + } + + // fast path: look for an unescaped string in the read buffer. + if p.i != p.j && p.b[p.i] == '"' { + chunk := p.b[p.i+1 : p.j] + off1 := bytes.IndexByte(chunk, '"') + off2 := bytes.IndexByte(chunk, '\\') + + if off1 >= 0 && off2 < 0 { + v = p.b[p.i+1 : p.i+1+off1] + p.i += off1 + 2 + return + } + } + + // there are escape characters or the string didn't fit in the read buffer. + if err = p.readByte('"'); err != nil { + return + } + + escaped := false + v = p.s[:0] + + for { + var b byte + + if b, err = p.peekByteAt(0); err != nil { + return + } + p.i++ + + if escaped { + escaped = false + switch b { + case '"', '\\', '/': + // simple escaped character + case 'n': + b = '\n' + + case 'r': + b = '\r' + + case 't': + b = '\t' + + case 'b': + b = '\b' + + case 'f': + b = '\f' + + case 'u': + var r1 rune + var r2 rune + if r1, err = p.readUnicode(); err != nil { + return + } + if utf16.IsSurrogate(r1) { + if r2, err = p.readUnicode(); err != nil { + return + } + r1 = utf16.DecodeRune(r1, r2) + } + v = append(v, 0, 0, 0, 0) // make room for 4 bytes + i := len(v) - 4 + n := utf8.EncodeRune(v[i:], r1) + v = v[:i+n] + continue + + default: // not sure what this escape sequence is + v = append(v, '\\') + } + } else if b == '\\' { + escaped = true + continue + } else if b == '"' { + break + } + + v = append(v, b) + } + + p.s = v[:0] + return +} + +func (p *Parser) ParseBytes() (v []byte, err error) { + panic("objconv/json: ParseBytes should never be called because JOSN has no bytes, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseTime() (v time.Time, err error) { + panic("objconv/json: ParseBytes should never be called because JSON has no time type, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseDuration() (v time.Duration, err error) { + panic("objconv/json: ParseDuration should never be called because JSON has no duration type, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseError() (v error, err error) { + panic("objconv/json: ParseError should never be called because JSON has no error type, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseArrayBegin() (n int, err error) { + return -1, p.readByte('[') +} + +func (p *Parser) ParseArrayEnd(n int) (err error) { + if err = p.skipSpaces(); err != nil { + return + } + return p.readByte(']') +} + +func (p *Parser) ParseArrayNext(n int) (err error) { + var b byte + + if err = p.skipSpaces(); err != nil { + return + } + + if b, err = p.peekByteAt(0); err != nil { + return + } + + switch { + case b == ',' && n != 0: + p.i++ + case b == ']': + err = objconv.End + default: + if n != 0 { // we likely are not in an empty array, there's a value to parse + err = fmt.Errorf("objconv/json: expected ',' or ']' but found '%c'", b) + } + } + + return +} + +func (p *Parser) ParseMapBegin() (n int, err error) { + return -1, p.readByte('{') +} + +func (p *Parser) ParseMapEnd(n int) (err error) { + if err = p.skipSpaces(); err != nil { + return + } + return p.readByte('}') +} + +func (p *Parser) ParseMapValue(n int) (err error) { + if err = p.skipSpaces(); err != nil { + return + } + return p.readByte(':') +} + +func (p *Parser) ParseMapNext(n int) (err error) { + var b byte + + if err = p.skipSpaces(); err != nil { + return + } + + if b, err = p.peekByteAt(0); err != nil { + return + } + + switch b { + case ',': + p.i++ + case '}': + err = objconv.End + default: + if n != 0 { // the map is not empty, likely there's a value to parse + err = fmt.Errorf("objconv/json: expected ',' or '}' but found '%c'", b) + } + } + + return +} + +func (p *Parser) TextParser() bool { + return true +} + +func (p *Parser) DecodeBytes(b []byte) (v []byte, err error) { + var n int + if n, err = base64.StdEncoding.Decode(b, b); err != nil { + return + } + v = b[:n] + return +} + +func (p *Parser) peek(n int) (b []byte, err error) { + for (p.i + n) > p.j { + if err = p.fill(); err != nil { + return + } + } + b = p.b[p.i : p.i+n] + return +} + +func (p *Parser) peekByteAt(i int) (b byte, err error) { + for (p.i + i + 1) > p.j { + if err = p.fill(); err != nil { + return + } + } + b = p.b[p.i+i] + return +} + +func isNumberByte(b byte) bool { + return (b >= '0' && b <= '9') || (b == '.') || (b == '+') || (b == '-') || (b == 'e') || (b == 'E') +} + +func (p *Parser) peekNumber() (b []byte, err error) { + // fast path: if the number is loaded in the read buffer we avoid the costly + // calls to peekByteAt. + for i, c := range p.b[p.i:p.j] { + if !isNumberByte(c) { + b = p.b[p.i : p.i+i] + return + } + } + + // slow path: the number was likely at the end of the read buffer, so there + // may be some missing digits, loading the read buffer and peeking bytes + // a non-numeric character is found. + var i int + for i = 0; true; i++ { + var c byte + + if c, err = p.peekByteAt(i); err != nil { + break + } + + if !isNumberByte(c) { + break + } + } + b = p.b[p.i : p.i+i] + return +} + +func (p *Parser) readByte(b byte) (err error) { + var c byte + + if c, err = p.peekByteAt(0); err == nil { + if b == c { + p.i++ + } else { + err = fmt.Errorf("objconv/json: expected '%c' but found '%c'", b, c) + } + } + + return +} + +func (p *Parser) readToken(token []byte) (err error) { + var chunk []byte + var n = len(token) + + if chunk, err = p.peek(n); err == nil { + if bytes.Equal(chunk, token) { + p.i += n + } else { + err = fmt.Errorf("objconv/json: expected %#v but found %#v", string(token), string(chunk)) + } + } + + return +} + +func (p *Parser) readUnicode() (r rune, err error) { + var chunk []byte + var code uint64 + + if chunk, err = p.peek(4); err != nil { + return + } + + if code, err = objutil.ParseUintHex(chunk); err != nil { + err = fmt.Errorf("objconv/json: expected an hexadecimal unicode code point but found %#v", string(chunk)) + return + } + + if code > objutil.Uint16Max { + err = fmt.Errorf("objconv/json: expected an hexadecimal unicode code points but found an overflowing value %X", code) + return + } + + p.i += 4 + r = rune(code) + return +} + +func (p *Parser) skipSpaces() (err error) { + for { + if p.i == p.j { + if err = p.fill(); err != nil { + return + } + } + + // seek the first byte in the read buffer that isn't a space character. + for _, b := range p.b[p.i:p.j] { + switch b { + case ' ', '\n', '\t', '\r', '\b', '\f': + p.i++ + default: + return + } + } + + // all trailing bytes in the read buffer were spaces, clear and refill. + p.i = 0 + p.j = 0 + } +} + +func (p *Parser) fill() (err error) { + n := p.j - p.i + copy(p.b[:n], p.b[p.i:p.j]) + p.i = 0 + p.j = n + + if n, err = p.r.Read(p.b[p.j:]); n > 0 { + err = nil + p.j += n + } else if err != nil { + return + } else { + err = io.ErrNoProgress + return + } + + return +} + +func stringNoCopy(b []byte) string { + n := len(b) + if n == 0 { + return "" + } + return *(*string)(unsafe.Pointer(&reflect.StringHeader{ + Data: uintptr(unsafe.Pointer(&b[0])), + Len: n, + })) +} diff --git a/vendor/github.com/segmentio/objconv/objutil/duration.go b/vendor/github.com/segmentio/objconv/objutil/duration.go new file mode 100644 index 0000000..a1a8af3 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/objutil/duration.go @@ -0,0 +1,125 @@ +package objutil + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import "time" + +// AppendDuration appends a human-readable representation of d to b. +// +// The function copies the implementation of time.Duration.String but prevents +// Go from making a dynamic memory allocation on the returned value. +func AppendDuration(b []byte, d time.Duration) []byte { + // Largest time is 2540400h10m10.000000000s + var buf [32]byte + w := len(buf) + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + if u < uint64(time.Second) { + // Special case: if duration is smaller than a second, + // use smaller units, like 1.2ms + var prec int + w-- + buf[w] = 's' + w-- + switch { + case u == 0: + return append(b, '0', 's') + case u < uint64(time.Microsecond): + // print nanoseconds + prec = 0 + buf[w] = 'n' + case u < uint64(time.Millisecond): + // print microseconds + prec = 3 + // U+00B5 'µ' micro sign == 0xC2 0xB5 + w-- // Need room for two bytes. + copy(buf[w:], "µ") + default: + // print milliseconds + prec = 6 + buf[w] = 'm' + } + w, u = fmtFrac(buf[:w], u, prec) + w = fmtInt(buf[:w], u) + } else { + w-- + buf[w] = 's' + + w, u = fmtFrac(buf[:w], u, 9) + + // u is now integer seconds + w = fmtInt(buf[:w], u%60) + u /= 60 + + // u is now integer minutes + if u > 0 { + w-- + buf[w] = 'm' + w = fmtInt(buf[:w], u%60) + u /= 60 + + // u is now integer hours + // Stop at hours because days can be different lengths. + if u > 0 { + w-- + buf[w] = 'h' + w = fmtInt(buf[:w], u) + } + } + } + + if neg { + w-- + buf[w] = '-' + } + + return append(b, buf[w:]...) +} + +// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the +// tail of buf, omitting trailing zeros. it omits the decimal +// point too when the fraction is 0. It returns the index where the +// output bytes begin and the value v/10**prec. +func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) { + // Omit trailing zeros up to and including decimal point. + w := len(buf) + print := false + for i := 0; i < prec; i++ { + digit := v % 10 + print = print || digit != 0 + if print { + w-- + buf[w] = byte(digit) + '0' + } + v /= 10 + } + if print { + w-- + buf[w] = '.' + } + return w, v +} + +// fmtInt formats v into the tail of buf. +// It returns the index where the output begins. +func fmtInt(buf []byte, v uint64) int { + w := len(buf) + if v == 0 { + w-- + buf[w] = '0' + } else { + for v > 0 { + w-- + buf[w] = byte(v%10) + '0' + v /= 10 + } + } + return w +} diff --git a/vendor/github.com/segmentio/objconv/objutil/empty.go b/vendor/github.com/segmentio/objconv/objutil/empty.go new file mode 100644 index 0000000..0fc19d6 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/objutil/empty.go @@ -0,0 +1,45 @@ +package objutil + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import ( + "reflect" + "unsafe" +) + +// IsEmpty returns true if the value given as argument would be considered +// empty by the standard library packages, and therefore not serialized if +// `omitempty` is set on a struct field with this value. +func IsEmpty(v interface{}) bool { + return IsEmptyValue(reflect.ValueOf(v)) +} + +// IsEmptyValue returns true if the value given as argument would be considered +// empty by the standard library packages, and therefore not serialized if +// `omitempty` is set on a struct field with this value. +// +// Based on https://golang.org/src/encoding/json/encode.go?h=isEmpty +func IsEmptyValue(v reflect.Value) bool { + if !v.IsValid() { + return true // nil interface{} + } + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr, reflect.Chan, reflect.Func: + return v.IsNil() + case reflect.UnsafePointer: + return unsafe.Pointer(v.Pointer()) == nil + } + return false +} diff --git a/vendor/github.com/segmentio/objconv/objutil/int.go b/vendor/github.com/segmentio/objconv/objutil/int.go new file mode 100644 index 0000000..38be544 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/objutil/int.go @@ -0,0 +1,133 @@ +package objutil + +import "fmt" + +// ParseInt parses a decimanl representation of an int64 from b. +// +// The function is equivalent to calling strconv.ParseInt(string(b), 10, 64) but +// it prevents Go from making a memory allocation for converting a byte slice to +// a string (escape analysis fails due to the error returned by strconv.ParseInt). +// +// Because it only works with base 10 the function is also significantly faster +// than strconv.ParseInt. +func ParseInt(b []byte) (int64, error) { + var val int64 + + if len(b) == 0 { + return 0, errorInvalidUint64(b) + } + + if b[0] == '-' { + const max = Int64Min + const lim = max / 10 + + if b = b[1:]; len(b) == 0 { + return 0, errorInvalidUint64(b) + } + + for _, d := range b { + if !(d >= '0' && d <= '9') { + return 0, errorInvalidInt64(b) + } + + if val < lim { + return 0, errorOverflowInt64(b) + } + + val *= 10 + x := int64(d - '0') + + if val < (max + x) { + return 0, errorOverflowInt64(b) + } + + val -= x + } + } else { + const max = Int64Max + const lim = max / 10 + + for _, d := range b { + if !(d >= '0' && d <= '9') { + return 0, errorInvalidInt64(b) + } + x := int64(d - '0') + + if val > lim { + return 0, errorOverflowInt64(b) + } + + if val *= 10; val > (max - x) { + return 0, errorOverflowInt64(b) + } + + val += x + } + } + + return val, nil +} + +// ParseUintHex parses a hexadecimanl representation of a uint64 from b. +// +// The function is equivalent to calling strconv.ParseUint(string(b), 16, 64) but +// it prevents Go from making a memory allocation for converting a byte slice to +// a string (escape analysis fails due to the error returned by strconv.ParseUint). +// +// Because it only works with base 16 the function is also significantly faster +// than strconv.ParseUint. +func ParseUintHex(b []byte) (uint64, error) { + const max = Uint64Max + const lim = max / 0x10 + var val uint64 + + if len(b) == 0 { + return 0, errorInvalidUint64(b) + } + + for _, d := range b { + var x uint64 + + switch { + case d >= '0' && d <= '9': + x = uint64(d - '0') + + case d >= 'A' && d <= 'F': + x = uint64(d-'A') + 0xA + + case d >= 'a' && d <= 'f': + x = uint64(d-'a') + 0xA + + default: + return 0, errorInvalidUint64(b) + } + + if val > lim { + return 0, errorOverflowUint64(b) + } + + if val *= 0x10; val > (max - x) { + return 0, errorOverflowUint64(b) + } + + val += x + } + + return val, nil +} + +func errorInvalidInt64(b []byte) error { + return fmt.Errorf("objconv: %#v is not a valid decimal representation of a signed 64 bits integer", string(b)) +} + +func errorOverflowInt64(b []byte) error { + return fmt.Errorf("objconv: %#v overflows the maximum values of a signed 64 bits integer", string(b)) +} + +func errorInvalidUint64(b []byte) error { + return fmt.Errorf("objconv: %#v is not a valid decimal representation of an unsigned 64 bits integer", string(b)) +} + +func errorOverflowUint64(b []byte) error { + return fmt.Errorf("objconv: %#v overflows the maximum values of an unsigned 64 bits integer", string(b)) +} diff --git a/vendor/github.com/segmentio/objconv/objutil/limits.go b/vendor/github.com/segmentio/objconv/objutil/limits.go new file mode 100644 index 0000000..b10361a --- /dev/null +++ b/vendor/github.com/segmentio/objconv/objutil/limits.go @@ -0,0 +1,107 @@ +package objutil + +import ( + "fmt" + "reflect" +) + +const ( + // UintMax is the maximum value of a uint. + UintMax = ^uint(0) + + // UintMin is the minimum value of a uint. + UintMin = 0 + + // Uint8Max is the maximum value of a uint8. + Uint8Max = 255 + + // Uint8Min is the minimum value of a uint8. + Uint8Min = 0 + + // Uint16Max is the maximum value of a uint16. + Uint16Max = 65535 + + // Uint16Min is the minimum value of a uint16. + Uint16Min = 0 + + // Uint32Max is the maximum value of a uint32. + Uint32Max = 4294967295 + + // Uint32Min is the minimum value of a uint32. + Uint32Min = 0 + + // Uint64Max is the maximum value of a uint64. + Uint64Max = 18446744073709551615 + + // Uint64Min is the minimum value of a uint64. + Uint64Min = 0 + + // UintptrMax is the maximum value of a uintptr. + UintptrMax = ^uintptr(0) + + // UintptrMin is the minimum value of a uintptr. + UintptrMin = 0 + + // IntMax is the maximum value of a int. + IntMax = int(UintMax >> 1) + + // IntMin is the minimum value of a int. + IntMin = -IntMax - 1 + + // Int8Max is the maximum value of a int8. + Int8Max = 127 + + // Int8Min is the minimum value of a int8. + Int8Min = -128 + + // Int16Max is the maximum value of a int16. + Int16Max = 32767 + + // Int16Min is the minimum value of a int16. + Int16Min = -32768 + + // Int32Max is the maximum value of a int32. + Int32Max = 2147483647 + + // Int32Min is the minimum value of a int32. + Int32Min = -2147483648 + + // Int64Max is the maximum value of a int64. + Int64Max = 9223372036854775807 + + // Int64Min is the minimum value of a int64. + Int64Min = -9223372036854775808 + + // Float32IntMax is the maximum consecutive integer value representable by a float32. + Float32IntMax = 16777216 + + // Float32IntMin is the minimum consecutive integer value representable by a float32. + Float32IntMin = -16777216 + + // Float64IntMax is the maximum consecutive integer value representable by a float64. + Float64IntMax = 9007199254740992 + + // Float64IntMin is the minimum consecutive integer value representable by a float64. + Float64IntMin = -9007199254740992 +) + +// CheckUint64Bounds verifies that v is smaller than max, t represents the +// original type of v. +func CheckUint64Bounds(v uint64, max uint64, t reflect.Type) (err error) { + if v > max { + err = fmt.Errorf("objconv: %d overflows the maximum value of %d for %s", v, max, t) + } + return +} + +// CheckInt64Bounds verifies that v is within min and max, t represents the +// original type of v. +func CheckInt64Bounds(v int64, min int64, max uint64, t reflect.Type) (err error) { + if v < min { + err = fmt.Errorf("objconv: %d overflows the minimum value of %d for %s", v, min, t) + } + if v > 0 && uint64(v) > max { + err = fmt.Errorf("objconv: %d overflows the maximum value of %d for %s", v, max, t) + } + return +} diff --git a/vendor/github.com/segmentio/objconv/objutil/tag.go b/vendor/github.com/segmentio/objconv/objutil/tag.go new file mode 100644 index 0000000..0dd8e4a --- /dev/null +++ b/vendor/github.com/segmentio/objconv/objutil/tag.go @@ -0,0 +1,72 @@ +package objutil + +import "strings" + +// Tag represents the result of parsing the tag of a struct field. +type Tag struct { + // Name is the field name that should be used when serializing. + Name string + + // Omitempty is true if the tag had `omitempty` set. + Omitempty bool + + // Omitzero is true if the tag had `omitzero` set. + Omitzero bool +} + +// ParseTag parses a raw tag obtained from a struct field, returning the results +// as a tag value. +func ParseTag(s string) Tag { + var name string + var omitzero bool + var omitempty bool + + name, s = parseNextTagToken(s) + + for len(s) != 0 { + var token string + switch token, s = parseNextTagToken(s); token { + case "omitempty": + omitempty = true + case "omitzero": + omitzero = true + } + } + + return Tag{ + Name: name, + Omitempty: omitempty, + Omitzero: omitzero, + } +} + +// ParseTagJSON is similar to ParseTag but only supports features supported by +// the standard encoding/json package. +func ParseTagJSON(s string) Tag { + var name string + var omitempty bool + + name, s = parseNextTagToken(s) + + for len(s) != 0 { + var token string + switch token, s = parseNextTagToken(s); token { + case "omitempty": + omitempty = true + } + } + + return Tag{ + Name: name, + Omitempty: omitempty, + } +} + +func parseNextTagToken(s string) (token string, next string) { + if split := strings.IndexByte(s, ','); split < 0 { + token = s + } else { + token, next = s[:split], s[split+1:] + } + return +} diff --git a/vendor/github.com/segmentio/objconv/objutil/zero.go b/vendor/github.com/segmentio/objconv/objutil/zero.go new file mode 100644 index 0000000..ff3dc35 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/objutil/zero.go @@ -0,0 +1,57 @@ +package objutil + +import ( + "reflect" + "unsafe" +) + +// IsZero returns true if the value given as argument is the zero-value of +// the type of v. +func IsZero(v interface{}) bool { + return IsZeroValue(reflect.ValueOf(v)) +} + +func IsZeroValue(v reflect.Value) bool { + if !v.IsValid() { + return true // nil interface{} + } + switch v.Kind() { + case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.Len() == 0 + case reflect.UnsafePointer: + return unsafe.Pointer(v.Pointer()) == nil + case reflect.Array: + return isZeroArray(v) + case reflect.Struct: + return isZeroStruct(v) + } + return false +} + +func isZeroArray(v reflect.Value) bool { + for i, n := 0, v.Len(); i != n; i++ { + if !IsZeroValue(v.Index(i)) { + return false + } + } + return true +} + +func isZeroStruct(v reflect.Value) bool { + for i, n := 0, v.NumField(); i != n; i++ { + if !IsZeroValue(v.Field(i)) { + return false + } + } + return true +} diff --git a/vendor/github.com/segmentio/objconv/parse.go b/vendor/github.com/segmentio/objconv/parse.go new file mode 100644 index 0000000..ca377aa --- /dev/null +++ b/vendor/github.com/segmentio/objconv/parse.go @@ -0,0 +1,138 @@ +package objconv + +import "time" + +// The Parser interface must be implemented by types that provide decoding of a +// specific format (like json, resp, ...). +// +// Parsers are not expected to be safe for use by multiple goroutines. +type Parser interface { + // ParseType is called by a decoder to ask the parser what is the type of + // the next value that can be parsed. + // + // ParseType must be idempotent, it must be possible to call it multiple + // without actually changing the state of the parser. + ParseType() (Type, error) + + // ParseNil parses a nil value. + ParseNil() error + + // ParseBool parses a boolean value. + ParseBool() (bool, error) + + // ParseInt parses an integer value. + ParseInt() (int64, error) + + // ParseUint parses an unsigned integer value. + ParseUint() (uint64, error) + + // ParseFloat parses a floating point value. + ParseFloat() (float64, error) + + // ParseString parses a string value. + // + // The string is returned as a byte slice because it is expected to be + // pointing at an internal memory buffer, the decoder will make a copy of + // the value. This design allows more memory allocation optimizations. + ParseString() ([]byte, error) + + // ParseBytes parses a byte array value. + // + // The returned byte slice is expected to be pointing at an internal memory + // buffer, the decoder will make a copy of the value. This design allows more + // memory allocation optimizations. + ParseBytes() ([]byte, error) + + // ParseTime parses a time value. + ParseTime() (time.Time, error) + + // ParseDuration parses a duration value. + ParseDuration() (time.Duration, error) + + // ParseError parses an error value. + ParseError() (error, error) + + // ParseArrayBegin is called by the array-decoding algorithm when it starts. + // + // The method should return the length of the array being decoded, or a + // negative value if it is unknown (some formats like json don't keep track + // of the length of the array). + ParseArrayBegin() (int, error) + + // ParseArrayEnd is called by the array-decoding algorithm when it + // completes. + // + // The method receives the iteration counter as argument, which indicates + // how many values were decoded from the array. + ParseArrayEnd(int) error + + // ParseArrayNext is called by the array-decoding algorithm between each + // value parsed in the array. + // + // The method receives the iteration counter as argument, which indicates + // how many values were decoded from the array. + // + // If the ParseArrayBegin method returned a negative value this method + // should return objconv.End to indicated that there is no more elements to + // parse in the array. In this case the method is also called right before + // decoding the first element ot handle the case where the array is empty + // and the end-of-array marker can be read right away. + ParseArrayNext(int) error + + // ParseMapBegin is called by the map-decoding algorithm when it starts. + // + // The method should return the length of the map being decoded, or a + // negative value if it is unknown (some formats like json don't keep track + // of the length of the map). + ParseMapBegin() (int, error) + + // ParseMapEnd is called by the map-decoding algorithm when it completes. + // + // The method receives the iteration counter as argument, which indicates + // how many values were decoded from the map. + ParseMapEnd(int) error + + // ParseMapValue is called by the map-decoding algorithm after parsing a key + // but before parsing the associated value. + // + // The method receives the iteration counter as argument, which indicates + // how many values were decoded from the map. + ParseMapValue(int) error + + // ParseMapNext is called by the map-decoding algorithm between each + // value parsed in the map. + // + // The method receives the iteration counter as argument, which indicates + // how many values were decoded from the map. + // + // If the ParseMapBegin method returned a negative value this method should + // return objconv.End to indicated that there is no more elements to parse + // in the map. In this case the method is also called right before decoding + // the first element ot handle the case where the array is empty and the + // end-of-map marker can be read right away. + ParseMapNext(int) error +} + +// The bytesDecoder interface may optionnaly be implemented by a Parser to +// provide an extra step in decoding a byte slice. This is sometimes necessary +// if the associated Emitter has transformed bytes slices because the format is +// not capable of representing binary data. +type bytesDecoder interface { + // DecodeBytes is called when the destination variable for a string or a + // byte slice is a byte slice, allowing the parser to apply a transformation + // before the value is stored. + DecodeBytes([]byte) ([]byte, error) +} + +// The textParser interface may be implemented by parsers of human-readable +// formats. Such parsers instruct the encoder to prefer using +// encoding.TextUnmarshaler over encoding.BinaryUnmarshaler for example. +type textParser interface { + // EmitsText returns true if the parser produces a human-readable format. + TextParser() bool +} + +func isTextParser(parser Parser) bool { + p, _ := parser.(textParser) + return p != nil && p.TextParser() +} diff --git a/vendor/github.com/segmentio/objconv/sort.go b/vendor/github.com/segmentio/objconv/sort.go new file mode 100644 index 0000000..20b012d --- /dev/null +++ b/vendor/github.com/segmentio/objconv/sort.go @@ -0,0 +1,64 @@ +package objconv + +import ( + "bytes" + "reflect" + "sort" +) + +type sortIntValues []reflect.Value + +func (s sortIntValues) Len() int { return len(s) } +func (s sortIntValues) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } +func (s sortIntValues) Less(i int, j int) bool { return s[i].Int() < s[j].Int() } + +type sortUintValues []reflect.Value + +func (s sortUintValues) Len() int { return len(s) } +func (s sortUintValues) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } +func (s sortUintValues) Less(i int, j int) bool { return s[i].Uint() < s[j].Uint() } + +type sortFloatValues []reflect.Value + +func (s sortFloatValues) Len() int { return len(s) } +func (s sortFloatValues) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } +func (s sortFloatValues) Less(i int, j int) bool { return s[i].Float() < s[j].Float() } + +type sortStringValues []reflect.Value + +func (s sortStringValues) Len() int { return len(s) } +func (s sortStringValues) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } +func (s sortStringValues) Less(i int, j int) bool { return s[i].String() < s[j].String() } + +type sortBytesValues []reflect.Value + +func (s sortBytesValues) Len() int { return len(s) } +func (s sortBytesValues) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } +func (s sortBytesValues) Less(i int, j int) bool { + return bytes.Compare(s[i].Bytes(), s[j].Bytes()) < 0 +} + +func sortValues(typ reflect.Type, v []reflect.Value) { + switch typ.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sort.Sort(sortIntValues(v)) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + sort.Sort(sortUintValues(v)) + + case reflect.Float32, reflect.Float64: + sort.Sort(sortFloatValues(v)) + + case reflect.String: + sort.Sort(sortStringValues(v)) + + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + sort.Sort(sortBytesValues(v)) + } + } + + // For all other types we give up on trying to sort the values, + // anyway it's likely not gonna be a serializable type, or something + // that doesn't make sense. +} diff --git a/vendor/github.com/segmentio/objconv/struct.go b/vendor/github.com/segmentio/objconv/struct.go new file mode 100644 index 0000000..8300502 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/struct.go @@ -0,0 +1,165 @@ +package objconv + +import ( + "reflect" + "sync" + + "github.com/segmentio/objconv/objutil" +) + +// structField represents a single field of a struct and carries information +// useful to the algorithms of the objconv package. +type structField struct { + // The index of the field in the structure. + index []int + + // The name of the field in the structure. + name string + + // Omitempty is set to true when the field should be omitted if it has an + // empty value. + omitempty bool + + // Omitzero is set to true when the field should be omitted if it has a zero + // value. + omitzero bool + + // cache for the encoder and decoder methods + encode encodeFunc + decode decodeFunc +} + +func makeStructField(f reflect.StructField, c map[reflect.Type]*structType) structField { + var t objutil.Tag + + if tag := f.Tag.Get("objconv"); len(tag) != 0 { + t = objutil.ParseTag(tag) + } else { + // To maximize compatibility with existing code we fallback to checking + // if the field has a `json` tag. + // + // This tag doesn't support any of the extra features that are supported + // by the `objconv` tag, and it should stay this way. It has to match + // the behavior of the standard encoding/json package to avoid any + // implicit changes in what would be intuitively expected. + t = objutil.ParseTagJSON(f.Tag.Get("json")) + } + + s := structField{ + index: f.Index, + name: f.Name, + omitempty: t.Omitempty, + omitzero: t.Omitzero, + + encode: makeEncodeFunc(f.Type, encodeFuncOpts{ + recurse: true, + structs: c, + }), + + decode: makeDecodeFunc(f.Type, decodeFuncOpts{ + recurse: true, + structs: c, + }), + } + + if len(t.Name) != 0 { + s.name = t.Name + } + + return s +} + +func (f *structField) omit(v reflect.Value) bool { + return (f.omitempty && objutil.IsEmptyValue(v)) || (f.omitzero && objutil.IsZeroValue(v)) +} + +// structType is used to represent a Go structure in internal data structures +// that cache meta information to make field lookups faster and avoid having to +// use reflection to lookup the same type information over and over again. +type structType struct { + fields []structField // the serializable fields of the struct + fieldsByName map[string]*structField // cache of fields by name +} + +// newStructType takes a Go type as argument and extract information to make a +// new structType value. +// The type has to be a struct type or a panic will be raised. +func newStructType(t reflect.Type, c map[reflect.Type]*structType) *structType { + if s := c[t]; s != nil { + return s + } + + n := t.NumField() + s := &structType{ + fields: make([]structField, 0, n), + fieldsByName: make(map[string]*structField), + } + c[t] = s + + for i := 0; i != n; i++ { + ft := t.Field(i) + + if ft.Anonymous || len(ft.PkgPath) != 0 { // anonymous or non-exported + continue + } + + sf := makeStructField(ft, c) + + if sf.name == "-" { // skip + continue + } + + s.fields = append(s.fields, sf) + s.fieldsByName[sf.name] = &s.fields[len(s.fields)-1] + } + + return s +} + +// structTypeCache is a simple cache for mapping Go types to Struct values. +type structTypeCache struct { + mutex sync.RWMutex + store map[reflect.Type]*structType +} + +// lookup takes a Go type as argument and returns the matching structType value, +// potentially creating it if it didn't already exist. +// This method is safe to call from multiple goroutines. +func (cache *structTypeCache) lookup(t reflect.Type) (s *structType) { + cache.mutex.RLock() + s = cache.store[t] + cache.mutex.RUnlock() + + if s == nil { + // There's a race confition here where this value may be generated + // multiple times. + // The impact in practice is really small as it's unlikely to happen + // often, we take the approach of keeping the logic simple and avoid + // a more complex synchronization logic required to solve this edge + // case. + s = newStructType(t, map[reflect.Type]*structType{}) + cache.mutex.Lock() + cache.store[t] = s + cache.mutex.Unlock() + } + + return +} + +// clear empties the cache. +func (cache *structTypeCache) clear() { + cache.mutex.Lock() + for typ := range cache.store { + delete(cache.store, typ) + } + cache.mutex.Unlock() +} + +var ( + // This struct cache is used to avoid reusing reflection over and over when + // the objconv functions are called. The performance improvements on iterating + // over struct fields are huge, this is a really important optimization: + structCache = structTypeCache{ + store: make(map[reflect.Type]*structType), + } +) diff --git a/vendor/github.com/segmentio/objconv/value.go b/vendor/github.com/segmentio/objconv/value.go new file mode 100644 index 0000000..fe1c55c --- /dev/null +++ b/vendor/github.com/segmentio/objconv/value.go @@ -0,0 +1,492 @@ +package objconv + +import ( + "encoding" + "errors" + "reflect" + "sync" + "time" + "unsafe" +) + +// Type is an enumeration that represent all the base types supported by the +// emitters and parsers. +type Type int + +const ( + Unknown Type = iota + Nil + Bool + Int + Uint + Float + String + Bytes + Time + Duration + Error + Array + Map +) + +// String returns a human readable representation of the type. +func (t Type) String() string { + switch t { + case Nil: + return "nil" + case Bool: + return "bool" + case Int: + return "int" + case Uint: + return "uint" + case Float: + return "float" + case String: + return "string" + case Bytes: + return "bytes" + case Time: + return "time" + case Duration: + return "duration" + case Error: + return "error" + case Array: + return "array" + case Map: + return "map" + default: + return "" + } +} + +var ( + zeroCache = make(map[reflect.Type]reflect.Value) + zeroMutex sync.RWMutex +) + +// zeroValueOf and the related cache is used to keep the zero values so they +// don't need to be reallocated every time they're used. +func zeroValueOf(t reflect.Type) reflect.Value { + zeroMutex.RLock() + v, ok := zeroCache[t] + zeroMutex.RUnlock() + + if !ok { + v = reflect.Zero(t) + zeroMutex.Lock() + zeroCache[t] = v + zeroMutex.Unlock() + } + + return v +} + +var ( + // basic types + boolType = reflect.TypeOf(false) + intType = reflect.TypeOf(int(0)) + int8Type = reflect.TypeOf(int8(0)) + int16Type = reflect.TypeOf(int16(0)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uintType = reflect.TypeOf(uint(0)) + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + uintptrType = reflect.TypeOf(uintptr(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + bytesType = reflect.TypeOf([]byte(nil)) + timeType = reflect.TypeOf(time.Time{}) + durationType = reflect.TypeOf(time.Duration(0)) + sliceInterfaceType = reflect.TypeOf(([]interface{})(nil)) + timePtrType = reflect.PtrTo(timeType) + + // interfaces + errorInterface = elemTypeOf((*error)(nil)) + valueEncoderInterface = elemTypeOf((*ValueEncoder)(nil)) + valueDecoderInterface = elemTypeOf((*ValueDecoder)(nil)) + binaryMarshalerInterface = elemTypeOf((*encoding.BinaryMarshaler)(nil)) + binaryUnmarshalerInterface = elemTypeOf((*encoding.BinaryUnmarshaler)(nil)) + textMarshalerInterface = elemTypeOf((*encoding.TextMarshaler)(nil)) + textUnmarshalerInterface = elemTypeOf((*encoding.TextUnmarshaler)(nil)) + emptyInterface = elemTypeOf((*interface{})(nil)) + + // common map types, used for optimization for map encoding algorithms + mapStringStringType = reflect.TypeOf((map[string]string)(nil)) + mapStringInterfaceType = reflect.TypeOf((map[string]interface{})(nil)) + mapInterfaceInterfaceType = reflect.TypeOf((map[interface{}]interface{})(nil)) +) + +func elemTypeOf(v interface{}) reflect.Type { + return reflect.TypeOf(v).Elem() +} + +func stringNoCopy(b []byte) string { + n := len(b) + if n == 0 { + return "" + } + return *(*string)(unsafe.Pointer(&reflect.StringHeader{ + Data: uintptr(unsafe.Pointer(&b[0])), + Len: n, + })) +} + +// ValueParser is parser that uses "natural" in-memory representation of data +// structures. +// +// This is mainly useful for testing the decoder algorithms. +type ValueParser struct { + stack []reflect.Value + ctx []valueParserContext +} + +type valueParserContext struct { + value reflect.Value + keys []reflect.Value + fields []structField +} + +// NewValueParser creates a new parser that exposes the value v. +func NewValueParser(v interface{}) *ValueParser { + return &ValueParser{ + stack: []reflect.Value{reflect.ValueOf(v)}, + } +} + +func (p *ValueParser) ParseType() (Type, error) { + v := p.value() + + if !v.IsValid() { + return Nil, nil + } + + switch v.Interface().(type) { + case time.Time: + return Time, nil + + case time.Duration: + return Duration, nil + + case error: + return Error, nil + } + + switch v.Kind() { + case reflect.Bool: + return Bool, nil + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return Int, nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return Uint, nil + + case reflect.Float32, reflect.Float64: + return Float, nil + + case reflect.String: + return String, nil + + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + return Bytes, nil + } + return Array, nil + + case reflect.Array: + return Array, nil + + case reflect.Map: + return Map, nil + + case reflect.Struct: + return Map, nil + + case reflect.Interface: + if v.IsNil() { + return Nil, nil + } + } + + return Nil, errors.New("objconv: unsupported type found in value parser: " + v.Type().String()) +} + +func (p *ValueParser) ParseNil() (err error) { + return +} + +func (p *ValueParser) ParseBool() (v bool, err error) { + v = p.value().Bool() + return +} + +func (p *ValueParser) ParseInt() (v int64, err error) { + v = p.value().Int() + return +} + +func (p *ValueParser) ParseUint() (v uint64, err error) { + v = p.value().Uint() + return +} + +func (p *ValueParser) ParseFloat() (v float64, err error) { + v = p.value().Float() + return +} + +func (p *ValueParser) ParseString() (v []byte, err error) { + v = []byte(p.value().String()) + return +} + +func (p *ValueParser) ParseBytes() (v []byte, err error) { + v = p.value().Bytes() + return +} + +func (p *ValueParser) ParseTime() (v time.Time, err error) { + v = p.value().Interface().(time.Time) + return +} + +func (p *ValueParser) ParseDuration() (v time.Duration, err error) { + v = p.value().Interface().(time.Duration) + return +} + +func (p *ValueParser) ParseError() (v error, err error) { + v = p.value().Interface().(error) + return +} + +func (p *ValueParser) ParseArrayBegin() (n int, err error) { + v := p.value() + n = v.Len() + p.pushContext(valueParserContext{value: v}) + + if n != 0 { + p.push(v.Index(0)) + } + + return +} + +func (p *ValueParser) ParseArrayEnd(n int) (err error) { + if n != 0 { + p.pop() + } + p.popContext() + return +} + +func (p *ValueParser) ParseArrayNext(n int) (err error) { + ctx := p.context() + p.pop() + p.push(ctx.value.Index(n)) + return +} + +func (p *ValueParser) ParseMapBegin() (n int, err error) { + v := p.value() + + if v.Kind() == reflect.Map { + n = v.Len() + k := v.MapKeys() + p.pushContext(valueParserContext{value: v, keys: k}) + if n != 0 { + p.push(k[0]) + } + } else { + c := valueParserContext{value: v} + s := structCache.lookup(v.Type()) + + for _, f := range s.fields { + if !f.omit(v.FieldByIndex(f.index)) { + c.fields = append(c.fields, f) + n++ + } + } + + p.pushContext(c) + if n != 0 { + p.push(reflect.ValueOf(c.fields[0].name)) + } + } + + return +} + +func (p *ValueParser) ParseMapEnd(n int) (err error) { + if n != 0 { + p.pop() + } + p.popContext() + return +} + +func (p *ValueParser) ParseMapValue(n int) (err error) { + ctx := p.context() + p.pop() + + if ctx.keys != nil { + p.push(ctx.value.MapIndex(ctx.keys[n])) + } else { + p.push(ctx.value.FieldByIndex(ctx.fields[n].index)) + } + + return +} + +func (p *ValueParser) ParseMapNext(n int) (err error) { + ctx := p.context() + p.pop() + + if ctx.keys != nil { + p.push(ctx.keys[n]) + } else { + p.push(reflect.ValueOf(ctx.fields[n].name)) + } + + return +} + +func (p *ValueParser) value() reflect.Value { + v := p.stack[len(p.stack)-1] + + if !v.IsValid() { + return v + } + + switch v.Interface().(type) { + case error: + return v + } + +dereference: + switch v.Kind() { + case reflect.Interface, reflect.Ptr: + if !v.IsNil() { + v = v.Elem() + goto dereference + } + } + + return v +} + +func (p *ValueParser) push(v reflect.Value) { + p.stack = append(p.stack, v) +} + +func (p *ValueParser) pop() { + p.stack = p.stack[:len(p.stack)-1] +} + +func (p *ValueParser) pushContext(ctx valueParserContext) { + p.ctx = append(p.ctx, ctx) +} + +func (p *ValueParser) popContext() { + p.ctx = p.ctx[:len(p.ctx)-1] +} + +func (p *ValueParser) context() *valueParserContext { + return &p.ctx[len(p.ctx)-1] +} + +// ValueEmitter is a special kind of emitter, instead of serializing the values +// it receives it builds an in-memory representation of the data. +// +// This is useful for testing the high-level API of the package without actually +// having to generate a serialized representation. +type ValueEmitter struct { + stack []interface{} + marks []int +} + +// NewValueEmitter returns a pointer to a new ValueEmitter object. +func NewValueEmitter() *ValueEmitter { + return &ValueEmitter{} +} + +// Value returns the value built in the emitter. +func (e *ValueEmitter) Value() interface{} { return e.stack[0] } + +func (e *ValueEmitter) EmitNil() error { return e.push(nil) } + +func (e *ValueEmitter) EmitBool(v bool) error { return e.push(v) } + +func (e *ValueEmitter) EmitInt(v int64, _ int) error { return e.push(v) } + +func (e *ValueEmitter) EmitUint(v uint64, _ int) error { return e.push(v) } + +func (e *ValueEmitter) EmitFloat(v float64, _ int) error { return e.push(v) } + +func (e *ValueEmitter) EmitString(v string) error { return e.push(v) } + +func (e *ValueEmitter) EmitBytes(v []byte) error { return e.push(v) } + +func (e *ValueEmitter) EmitTime(v time.Time) error { return e.push(v) } + +func (e *ValueEmitter) EmitDuration(v time.Duration) error { return e.push(v) } + +func (e *ValueEmitter) EmitError(v error) error { return e.push(v) } + +func (e *ValueEmitter) EmitArrayBegin(v int) error { return e.pushMark() } + +func (e *ValueEmitter) EmitArrayEnd() error { + v := e.pop(e.popMark()) + a := make([]interface{}, len(v)) + copy(a, v) + return e.push(a) +} + +func (e *ValueEmitter) EmitArrayNext() error { return nil } + +func (e *ValueEmitter) EmitMapBegin(v int) error { return e.pushMark() } + +func (e *ValueEmitter) EmitMapEnd() error { + v := e.pop(e.popMark()) + n := len(v) + m := make(map[interface{}]interface{}, n/2) + + for i := 0; i != n; i += 2 { + m[v[i]] = v[i+1] + } + + return e.push(m) +} + +func (e *ValueEmitter) EmitMapValue() error { return nil } + +func (e *ValueEmitter) EmitMapNext() error { return nil } + +func (e *ValueEmitter) push(v interface{}) error { + e.stack = append(e.stack, v) + return nil +} + +func (e *ValueEmitter) pop(n int) []interface{} { + v := e.stack[n:] + e.stack = e.stack[:n] + return v +} + +func (e *ValueEmitter) pushMark() error { + e.marks = append(e.marks, len(e.stack)) + return nil +} + +func (e *ValueEmitter) popMark() int { + n := len(e.marks) - 1 + m := e.marks[n] + e.marks = e.marks[:n] + return m +} diff --git a/vendor/github.com/segmentio/objconv/yaml/decode.go b/vendor/github.com/segmentio/objconv/yaml/decode.go new file mode 100644 index 0000000..fdee912 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/yaml/decode.go @@ -0,0 +1,51 @@ +package yaml + +import ( + "bytes" + "io" + "sync" + + "github.com/segmentio/objconv" +) + +// NewDecoder returns a new YAML decoder that parses values from r. +func NewDecoder(r io.Reader) *objconv.Decoder { + return objconv.NewDecoder(NewParser(r)) +} + +// NewStreamDecoder returns a new YAML stream decoder that parses values from r. +func NewStreamDecoder(r io.Reader) *objconv.StreamDecoder { + return objconv.NewStreamDecoder(NewParser(r)) +} + +// Unmarshal decodes a YAML representation of v from b. +func Unmarshal(b []byte, v interface{}) error { + u := unmarshalerPool.Get().(*unmarshaler) + u.reset(b) + + err := (objconv.Decoder{Parser: u}).Decode(v) + + u.reset(nil) + unmarshalerPool.Put(u) + return err +} + +var unmarshalerPool = sync.Pool{ + New: func() interface{} { return newUnmarshaler() }, +} + +type unmarshaler struct { + Parser + b bytes.Buffer +} + +func newUnmarshaler() *unmarshaler { + u := &unmarshaler{} + u.r = &u.b + return u +} + +func (u *unmarshaler) reset(b []byte) { + u.b = *bytes.NewBuffer(b) + u.Reset(&u.b) +} diff --git a/vendor/github.com/segmentio/objconv/yaml/emit.go b/vendor/github.com/segmentio/objconv/yaml/emit.go new file mode 100644 index 0000000..e170e3d --- /dev/null +++ b/vendor/github.com/segmentio/objconv/yaml/emit.go @@ -0,0 +1,166 @@ +package yaml + +import ( + "encoding/base64" + "io" + "time" + + yaml "gopkg.in/yaml.v2" +) + +// Emitter implements a YAML emitter that satisfies the objconv.Emitter +// interface. +type Emitter struct { + w io.Writer + // The stack is used to keep track of the container being built by the + // emitter, which may be an arrayEmitter or mapEmitter. + stack []emitter +} + +func NewEmitter(w io.Writer) *Emitter { + return &Emitter{w: w} +} + +func (e *Emitter) Reset(w io.Writer) { + e.w = w + e.stack = e.stack[:0] +} + +func (e *Emitter) EmitNil() error { + return e.emit(nil) +} + +func (e *Emitter) EmitBool(v bool) error { + return e.emit(v) +} + +func (e *Emitter) EmitInt(v int64, _ int) error { + return e.emit(v) +} + +func (e *Emitter) EmitUint(v uint64, _ int) error { + return e.emit(v) +} + +func (e *Emitter) EmitFloat(v float64, _ int) error { + return e.emit(v) +} + +func (e *Emitter) EmitString(v string) error { + return e.emit(v) +} + +func (e *Emitter) EmitBytes(v []byte) error { + return e.emit(base64.StdEncoding.EncodeToString(v)) +} + +func (e *Emitter) EmitTime(v time.Time) error { + return e.emit(v.Format(time.RFC3339Nano)) +} + +func (e *Emitter) EmitDuration(v time.Duration) error { + return e.emit(v.String()) +} + +func (e *Emitter) EmitError(v error) error { + return e.emit(v.Error()) +} + +func (e *Emitter) EmitArrayBegin(_ int) (err error) { + e.push(&arrayEmitter{}) + return +} + +func (e *Emitter) EmitArrayEnd() (err error) { + e.emit(e.pop().value()) + return +} + +func (e *Emitter) EmitArrayNext() (err error) { + return +} + +func (e *Emitter) EmitMapBegin(_ int) (err error) { + e.push(&mapEmitter{}) + return +} + +func (e *Emitter) EmitMapEnd() (err error) { + e.emit(e.pop().value()) + return +} + +func (e *Emitter) EmitMapValue() (err error) { + return +} + +func (e *Emitter) EmitMapNext() (err error) { + return +} + +func (e *Emitter) TextEmitter() bool { + return true +} + +func (e *Emitter) emit(v interface{}) (err error) { + var b []byte + + if n := len(e.stack); n != 0 { + e.stack[n-1].emit(v) + return + } + + if b, err = yaml.Marshal(v); err != nil { + return + } + + _, err = e.w.Write(b) + return +} + +func (e *Emitter) push(v emitter) { + e.stack = append(e.stack, v) +} + +func (e *Emitter) pop() emitter { + i := len(e.stack) - 1 + v := e.stack[i] + e.stack = e.stack[:i] + return v +} + +type emitter interface { + emit(interface{}) + value() interface{} +} + +type arrayEmitter struct { + self []interface{} +} + +func (e *arrayEmitter) emit(v interface{}) { + e.self = append(e.self, v) +} + +func (e *arrayEmitter) value() interface{} { + return e.self +} + +type mapEmitter struct { + self yaml.MapSlice + val bool +} + +func (e *mapEmitter) emit(v interface{}) { + if e.val { + e.val = false + e.self[len(e.self)-1].Value = v + } else { + e.val = true + e.self = append(e.self, yaml.MapItem{Key: v}) + } +} + +func (e *mapEmitter) value() interface{} { + return e.self +} diff --git a/vendor/github.com/segmentio/objconv/yaml/encode.go b/vendor/github.com/segmentio/objconv/yaml/encode.go new file mode 100644 index 0000000..c5c9bed --- /dev/null +++ b/vendor/github.com/segmentio/objconv/yaml/encode.go @@ -0,0 +1,48 @@ +package yaml + +import ( + "bytes" + "io" + "sync" + + "github.com/segmentio/objconv" +) + +// NewEncoder returns a new YAML encoder that writes to w. +func NewEncoder(w io.Writer) *objconv.Encoder { + return objconv.NewEncoder(NewEmitter(w)) +} + +// NewStreamEncoder returns a new YAML stream encoder that writes to w. +func NewStreamEncoder(w io.Writer) *objconv.StreamEncoder { + return objconv.NewStreamEncoder(NewEmitter(w)) +} + +// Marshal writes the YAML representation of v to a byte slice returned in b. +func Marshal(v interface{}) (b []byte, err error) { + m := marshalerPool.Get().(*marshaler) + m.b.Truncate(0) + + if err = (objconv.Encoder{Emitter: m}).Encode(v); err == nil { + b = make([]byte, m.b.Len()) + copy(b, m.b.Bytes()) + } + + marshalerPool.Put(m) + return +} + +var marshalerPool = sync.Pool{ + New: func() interface{} { return newMarshaler() }, +} + +type marshaler struct { + Emitter + b bytes.Buffer +} + +func newMarshaler() *marshaler { + m := &marshaler{} + m.w = &m.b + return m +} diff --git a/vendor/github.com/segmentio/objconv/yaml/init.go b/vendor/github.com/segmentio/objconv/yaml/init.go new file mode 100644 index 0000000..470cc5f --- /dev/null +++ b/vendor/github.com/segmentio/objconv/yaml/init.go @@ -0,0 +1,23 @@ +package yaml + +import ( + "io" + + "github.com/segmentio/objconv" +) + +// Codec for the YAML format. +var Codec = objconv.Codec{ + NewEmitter: func(w io.Writer) objconv.Emitter { return NewEmitter(w) }, + NewParser: func(r io.Reader) objconv.Parser { return NewParser(r) }, +} + +func init() { + for _, name := range [...]string{ + "application/yaml", + "text/yaml", + "yaml", + } { + objconv.Register(name, Codec) + } +} diff --git a/vendor/github.com/segmentio/objconv/yaml/parse.go b/vendor/github.com/segmentio/objconv/yaml/parse.go new file mode 100644 index 0000000..481bc58 --- /dev/null +++ b/vendor/github.com/segmentio/objconv/yaml/parse.go @@ -0,0 +1,316 @@ +package yaml + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "time" + + yaml "gopkg.in/yaml.v2" + + "github.com/segmentio/objconv" +) + +type Parser struct { + r io.Reader // reader to load bytes from + s []byte // string buffer + // This stack is used to iterate over the arrays and maps that get loaded in + // the value field. + stack []parser +} + +func NewParser(r io.Reader) *Parser { + return &Parser{r: r} +} + +func (p *Parser) Reset(r io.Reader) { + p.r = r + p.s = nil + p.stack = nil +} + +func (p *Parser) Buffered() io.Reader { + return bytes.NewReader(nil) +} + +func (p *Parser) ParseType() (typ objconv.Type, err error) { + if p.stack == nil { + var b []byte + var v interface{} + + if b, err = ioutil.ReadAll(p.r); err != nil { + return + } + if err = yaml.Unmarshal(b, &v); err != nil { + return + } + p.push(newParser(v)) + } + + switch v := p.value(); v.(type) { + case nil: + typ = objconv.Nil + + case bool: + typ = objconv.Bool + + case int, int64: + typ = objconv.Int + + case uint64: + typ = objconv.Uint + + case float64: + typ = objconv.Float + + case string: + typ = objconv.String + + case yaml.MapSlice: + typ = objconv.Map + + case []interface{}: + typ = objconv.Array + + case eof: + err = io.EOF + + default: + err = fmt.Errorf("objconv/yaml: gopkg.in/yaml.v2 generated an unsupported value of type %T", v) + } + + return +} + +func (p *Parser) ParseNil() (err error) { + p.pop() + return +} + +func (p *Parser) ParseBool() (v bool, err error) { + v = p.pop().value().(bool) + return +} + +func (p *Parser) ParseInt() (v int64, err error) { + switch x := p.pop().value().(type) { + case int: + v = int64(x) + default: + v = x.(int64) + } + return +} + +func (p *Parser) ParseUint() (v uint64, err error) { + v = p.pop().value().(uint64) + return +} + +func (p *Parser) ParseFloat() (v float64, err error) { + v = p.pop().value().(float64) + return +} + +func (p *Parser) ParseString() (v []byte, err error) { + s := p.pop().value().(string) + n := len(s) + + if cap(p.s) < n { + p.s = make([]byte, 0, ((n/1024)+1)*1024) + } + + v = p.s[:n] + copy(v, s) + return +} + +func (p *Parser) ParseBytes() (v []byte, err error) { + panic("objconv/yaml: ParseBytes should never be called because YAML has no bytes type, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseTime() (v time.Time, err error) { + panic("objconv/yaml: ParseBytes should never be called because YAML has no time type, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseDuration() (v time.Duration, err error) { + panic("objconv/yaml: ParseDuration should never be called because YAML has no duration type, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseError() (v error, err error) { + panic("objconv/yaml: ParseError should never be called because YAML has no error type, this is likely a bug in the decoder code") +} + +func (p *Parser) ParseArrayBegin() (n int, err error) { + if n = p.top().len(); n != 0 { + p.push(newParser(p.top().next())) + } + return +} + +func (p *Parser) ParseArrayEnd(n int) (err error) { + p.pop() + return +} + +func (p *Parser) ParseArrayNext(n int) (err error) { + p.push(newParser(p.top().next())) + return +} + +func (p *Parser) ParseMapBegin() (n int, err error) { + if n = p.top().len(); n != 0 { + p.push(newParser(p.top().next())) + } + return +} + +func (p *Parser) ParseMapEnd(n int) (err error) { + p.pop() + return +} + +func (p *Parser) ParseMapValue(n int) (err error) { + p.push(newParser(p.top().next())) + return +} + +func (p *Parser) ParseMapNext(n int) (err error) { + p.push(newParser(p.top().next())) + return +} + +func (p *Parser) TextParser() bool { + return true +} + +func (p *Parser) DecodeBytes(b []byte) (v []byte, err error) { + var n int + if n, err = base64.StdEncoding.Decode(b, b); err != nil { + return + } + v = b[:n] + return +} + +func (p *Parser) push(v parser) { + p.stack = append(p.stack, v) +} + +func (p *Parser) pop() parser { + i := len(p.stack) - 1 + v := p.stack[i] + p.stack = p.stack[:i] + return v +} + +func (p *Parser) top() parser { + return p.stack[len(p.stack)-1] +} + +func (p *Parser) value() interface{} { + n := len(p.stack) + if n == 0 { + return eof{} + } + return p.stack[n-1].value() +} + +type parser interface { + value() interface{} + next() interface{} + len() int +} + +type valueParser struct { + self interface{} +} + +func (p *valueParser) value() interface{} { + return p.self +} + +func (p *valueParser) next() interface{} { + panic("objconv/yaml: invalid call of next method on simple value parser") +} + +func (p *valueParser) len() int { + panic("objconv/yaml: invalid call of len method on simple value parser") +} + +type arrayParser struct { + self []interface{} + off int +} + +func (p *arrayParser) value() interface{} { + return p.self +} + +func (p *arrayParser) next() interface{} { + v := p.self[p.off] + p.off++ + return v +} + +func (p *arrayParser) len() int { + return len(p.self) +} + +type mapParser struct { + self yaml.MapSlice + off int + val bool +} + +func (p *mapParser) value() interface{} { + return p.self +} + +func (p *mapParser) next() (v interface{}) { + if p.val { + v = p.self[p.off].Value + p.val = false + p.off++ + } else { + v = p.self[p.off].Key + p.val = true + } + return +} + +func (p *mapParser) len() int { + return len(p.self) +} + +func newParser(v interface{}) parser { + switch x := v.(type) { + case map[interface{}]interface{}: + return &mapParser{self: makeMapSlice(x)} + + case []interface{}: + return &arrayParser{self: x} + + default: + return &valueParser{self: x} + } +} + +func makeMapSlice(m map[interface{}]interface{}) yaml.MapSlice { + s := make(yaml.MapSlice, 0, len(m)) + + for k, v := range m { + s = append(s, yaml.MapItem{ + Key: k, + Value: v, + }) + } + + return s +} + +// eof values are returned by the top method to indicate that all values have +// already been consumed. +type eof struct{} diff --git a/vendor/github.com/xtgo/uuid/AUTHORS b/vendor/github.com/xtgo/uuid/AUTHORS deleted file mode 100644 index a6f0451..0000000 --- a/vendor/github.com/xtgo/uuid/AUTHORS +++ /dev/null @@ -1,5 +0,0 @@ -# This source file refers to The gocql Authors for copyright purposes. - -Christoph Hack -Jonathan Rudenberg -Thorsten von Eicken diff --git a/vendor/gopkg.in/go-playground/mold.v2/LICENSE b/vendor/gopkg.in/go-playground/mold.v2/LICENSE new file mode 100644 index 0000000..12f1fe2 --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Dean Karn + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/gopkg.in/go-playground/mold.v2/README.md b/vendor/gopkg.in/go-playground/mold.v2/README.md new file mode 100644 index 0000000..3169936 --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/README.md @@ -0,0 +1,209 @@ +Package mold +============ +![Project status](https://img.shields.io/badge/version-2.2.0-green.svg) +[![Build Status](https://travis-ci.org/go-playground/mold.svg?branch=v2)](https://travis-ci.org/go-playground/mold) +[![Coverage Status](https://coveralls.io/repos/github/go-playground/mold/badge.svg?branch=v2)](https://coveralls.io/github/go-playground/mold?branch=v2) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/mold)](https://goreportcard.com/report/github.com/go-playground/mold) +[![GoDoc](https://godoc.org/gopkg.in/go-playground/mold.v2?status.svg)](https://godoc.org/gopkg.in/go-playground/mold.v2) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Package mold is a general library to help modify or set data within data structures and other objects. + +How can this help me you ask, please see the examples [here](_examples/full/main.go) + +Installation +------------ + +Use go get. +```shell +go get -u gopkg.in/go-playground/mold.v2 +``` + +Then import the form package into your own code. + + import "gopkg.in/go-playground/mold.v2" + +Simple example +----- +```go +package main + +import ( + "context" + "fmt" + "log" + "reflect" + + "gopkg.in/go-playground/mold.v2" +) + +var tform *mold.Transformer + +func main() { + tform = mold.New() + tform.Register("set", transformMyData) + + type Test struct { + String string `mold:"set"` + } + + var tt Test + + err := tform.Struct(context.Background(), &tt) + if err != nil { + log.Fatal(err) + } + fmt.Printf("%+v\n", tt) + + var myString string + err = tform.Field(context.Background(), &myString, "set") + if err != nil { + log.Fatal(err) + } + fmt.Println(myString) +} + +func transformMyData(ctx context.Context, t *mold.Transformer, value reflect.Value, param string) error { + value.SetString("test") + return nil +} +``` + +Full example +----- +```go +package main + +import ( + "context" + "fmt" + "log" + "net/url" + + "github.com/go-playground/form" + + "gopkg.in/go-playground/mold.v2/modifiers" + "gopkg.in/go-playground/mold.v2/scrubbers" + + "gopkg.in/go-playground/validator.v9" +) + +// This example is centered around a form post, but doesn't have to be +// just trying to give a well rounded real life example. + +//
+// +// +// +// +// +// +// +// +// +//
+ +var ( + conform = modifiers.New() + scrub = scrubbers.New() + validate = validator.New() + decoder = form.NewDecoder() +) + +// Address contains address information +type Address struct { + Name string `mod:"trim" validate:"required"` + Phone string `mod:"trim" validate:"required"` +} + +// User contains user information +type User struct { + Name string `mod:"trim" validate:"required" scrub:"name"` + Age uint8 ` validate:"required,gt=0,lt=130"` + Gender string ` validate:"required"` + Email string `mod:"trim" validate:"required,email" scrub:"emails"` + Address []Address ` validate:"required,dive"` + Active bool `form:"active"` +} + +func main() { + // this simulates the results of http.Request's ParseForm() function + values := parseForm() + + var user User + + // must pass a pointer + err := decoder.Decode(&user, values) + if err != nil { + log.Panic(err) + } + fmt.Printf("Decoded:%+v\n\n", user) + + // great not lets conform our values, after all a human input the data + // nobody's perfect + err = conform.Struct(context.Background(), &user) + if err != nil { + log.Panic(err) + } + fmt.Printf("Conformed:%+v\n\n", user) + + // that's better all those extra spaces are gone + // let's validate the data + err = validate.Struct(user) + if err != nil { + log.Panic(err) + } + + // ok now we know our data is good, let's do something with it like: + // save to database + // process request + // etc.... + + // ok now I'm done working with my data + // let's log or store it somewhere + // oh wait a minute, we have some sensitive PII data + // let's make sure that's de-identified first + err = scrub.Struct(context.Background(), &user) + if err != nil { + log.Panic(err) + } + fmt.Printf("Scrubbed:%+v\n\n", user) +} + +// this simulates the results of http.Request's ParseForm() function +func parseForm() url.Values { + return url.Values{ + "Name": []string{" joeybloggs "}, + "Age": []string{"3"}, + "Gender": []string{"Male"}, + "Email": []string{"Dean.Karn@gmail.com "}, + "Address[0].Name": []string{"26 Here Blvd."}, + "Address[0].Phone": []string{"9(999)999-9999"}, + "Address[1].Name": []string{"26 There Blvd."}, + "Address[1].Phone": []string{"1(111)111-1111"}, + "active": []string{"true"}, + } +} +``` + +Special Information +------------------- +- To use a comma(,) within your params replace use it's hex representation instead '0x2C' which will be replaced while caching. + +Contributing +------------ +I am definitly interested in the communities help in adding more scrubbers and modifiers. +Please send a PR with tests, and prefereably no extra dependencies, at lease until a solid base +has been built. + +Complimentary Software +---------------------- + +Here is a list of software that compliments using this library post decoding. + +* [validator](https://github.com/go-playground/validator) - Go Struct and Field validation, including Cross Field, Cross Struct, Map, Slice and Array diving. +* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/gopkg.in/go-playground/mold.v2/cache.go b/vendor/gopkg.in/go-playground/mold.v2/cache.go new file mode 100644 index 0000000..38957dc --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/cache.go @@ -0,0 +1,266 @@ +package mold + +import ( + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type tagType uint8 + +const ( + typeDefault tagType = iota + typeDive + typeKeys + typeEndKeys +) + +const ( + keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag" +) + +type structCache struct { + lock sync.Mutex + m atomic.Value // map[reflect.Type]*cStruct +} + +func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) { + c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key] + return +} + +func (sc *structCache) Set(key reflect.Type, value *cStruct) { + + m := sc.m.Load().(map[reflect.Type]*cStruct) + + nm := make(map[reflect.Type]*cStruct, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + sc.m.Store(nm) +} + +type tagCache struct { + lock sync.Mutex + m atomic.Value // map[string]*cTag +} + +func (tc *tagCache) Get(key string) (c *cTag, found bool) { + c, found = tc.m.Load().(map[string]*cTag)[key] + return +} + +func (tc *tagCache) Set(key string, value *cTag) { + + m := tc.m.Load().(map[string]*cTag) + + nm := make(map[string]*cTag, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + tc.m.Store(nm) +} + +type cStruct struct { + fields []*cField + fn StructLevelFunc +} + +type cField struct { + idx int + cTags *cTag +} + +type cTag struct { + tag string + aliasTag string + actualAliasTag string + hasAlias bool + typeof tagType + hasTag bool + fn Func + keys *cTag + next *cTag + param string +} + +func (t *Transformer) extractStructCache(current reflect.Value) (*cStruct, error) { + t.cCache.lock.Lock() + defer t.cCache.lock.Unlock() + + typ := current.Type() + + // could have been multiple trying to access, but once first is done this ensures struct + // isn't parsed again. + cs, ok := t.cCache.Get(typ) + if ok { + return cs, nil + } + + cs = &cStruct{fields: make([]*cField, 0), fn: t.structLevelFuncs[typ]} + numFields := current.NumField() + + var ctag *cTag + var fld reflect.StructField + var tag string + var err error + + for i := 0; i < numFields; i++ { + + fld = typ.Field(i) + + if !fld.Anonymous && len(fld.PkgPath) > 0 { + continue + } + + tag = fld.Tag.Get(t.tagName) + if tag == ignoreTag { + continue + } + + // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different + // and so only struct level caching can be used instead of combined with Field tag caching + if len(tag) > 0 { + ctag, _, err = t.parseFieldTagsRecursive(tag, fld.Name, "", false) + if err != nil { + return nil, err + } + } else { + // even if field doesn't have validations need cTag for traversing to potential inner/nested + // elements of the field. + ctag = new(cTag) + } + + cs.fields = append(cs.fields, &cField{ + idx: i, + cTags: ctag, + }) + } + + t.cCache.Set(typ, cs) + + return cs, nil +} + +func (t *Transformer) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag, err error) { + + var tg string + var ok bool + noAlias := len(alias) == 0 + tags := strings.Split(tag, tagSeparator) + + for i := 0; i < len(tags); i++ { + + tg = tags[i] + if noAlias { + alias = tg + } + + // check map for alias and process new tags, otherwise process as usual + if tagsVal, found := t.aliases[tg]; found { + if i == 0 { + firstCtag, current, err = t.parseFieldTagsRecursive(tagsVal, fieldName, tg, true) + if err != nil { + return + } + } else { + next, curr, errr := t.parseFieldTagsRecursive(tagsVal, fieldName, tg, true) + if errr != nil { + err = errr + return + } + current.next, current = next, curr + } + continue + } + + var prevTag tagType + + if i == 0 { + current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + firstCtag = current + } else { + prevTag = current.typeof + current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + current = current.next + } + + switch tg { + + case diveTag: + current.typeof = typeDive + continue + + case keysTag: + current.typeof = typeKeys + + if i == 0 || prevTag != typeDive { + err = ErrInvalidKeysTag + return + } + + current.typeof = typeKeys + + // need to pass along only keys tag + // need to increment i to skip over the keys tags + b := make([]byte, 0, 64) + + i++ + + for ; i < len(tags); i++ { + + b = append(b, tags[i]...) + b = append(b, ',') + + if tags[i] == endKeysTag { + break + } + } + + if current.keys, _, err = t.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false); err != nil { + return + } + continue + + case endKeysTag: + current.typeof = typeEndKeys + + // if there are more in tags then there was no keysTag defined + // and an error should be thrown + if i != len(tags)-1 { + err = ErrUndefinedKeysTag + } + return + + default: + + vals := strings.SplitN(tg, tagKeySeparator, 2) + + if noAlias { + alias = vals[0] + current.aliasTag = alias + } else { + current.actualAliasTag = tg + } + + current.tag = vals[0] + if len(current.tag) == 0 { + err = &ErrInvalidTag{tag: current.tag, field: fieldName} + return + } + + if current.fn, ok = t.transformations[current.tag]; !ok { + err = &ErrUndefinedTag{tag: current.tag, field: fieldName} + return + } + + if len(vals) > 1 { + current.param = strings.Replace(vals[1], utf8HexComma, ",", -1) + } + } + } + return +} diff --git a/vendor/gopkg.in/go-playground/mold.v2/errors.go b/vendor/gopkg.in/go-playground/mold.v2/errors.go new file mode 100644 index 0000000..d0bfb21 --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/errors.go @@ -0,0 +1,71 @@ +package mold + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +var ( + // ErrInvalidDive describes an invalid dive tag configuration + ErrInvalidDive = errors.New("Invalid dive tag configuration") + + // ErrUndefinedKeysTag describes an undefined keys tag when and endkeys tag defined + ErrUndefinedKeysTag = errors.New("'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag") + + // ErrInvalidKeysTag describes a misuse of the keys tag + ErrInvalidKeysTag = errors.New("'" + keysTag + "' tag must be immediately preceeded by the '" + diveTag + "' tag") +) + +// ErrUndefinedTag defines a tag that does not exist +type ErrUndefinedTag struct { + tag string + field string +} + +// Error returns the UndefinedTag error text +func (e *ErrUndefinedTag) Error() string { + return strings.TrimSpace(fmt.Sprintf("unregistered/undefined transformation '%s' found on field %s", e.tag, e.field)) +} + +// ErrInvalidTag defines a bad value for a tag being used +type ErrInvalidTag struct { + tag string + field string +} + +// Error returns the InvalidTag error text +func (e *ErrInvalidTag) Error() string { + return fmt.Sprintf("invalid tag '%s' found on field %s", e.tag, e.field) +} + +// An ErrInvalidTransformValue describes an invalid argument passed to Struct or Var. +// (The argument passed must be a non-nil pointer.) +type ErrInvalidTransformValue struct { + typ reflect.Type + fn string +} + +func (e *ErrInvalidTransformValue) Error() string { + if e.typ == nil { + return fmt.Sprintf("mold: %s(nil)", e.fn) + } + + if e.typ.Kind() != reflect.Ptr { + return fmt.Sprintf("mold: %s(non-pointer %s)", e.fn, e.typ.String()) + } + + return fmt.Sprintf("mold: %s(nil %s)", e.fn, e.typ.String()) +} + +// ErrInvalidTransformation describes an invalid argument passed to +// `Struct` or `Field` +type ErrInvalidTransformation struct { + typ reflect.Type +} + +// Error returns ErrInvalidTransformation message +func (e *ErrInvalidTransformation) Error() string { + return "mold: (nil " + e.typ.String() + ")" +} diff --git a/vendor/gopkg.in/go-playground/mold.v2/modifiers/modifiers.go b/vendor/gopkg.in/go-playground/mold.v2/modifiers/modifiers.go new file mode 100644 index 0000000..7710651 --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/modifiers/modifiers.go @@ -0,0 +1,18 @@ +package modifiers + +import "gopkg.in/go-playground/mold.v2" + +// New returns a modifier with defaults registered +func New() *mold.Transformer { + mod := mold.New() + mod.SetTagName("mod") + mod.Register("trim", TrimSpace) + mod.Register("ltrim", TrimLeft) + mod.Register("rtrim", TrimRight) + mod.Register("tprefix", TrimPrefix) + mod.Register("tsuffix", TrimSuffix) + mod.Register("lcase", ToLower) + mod.Register("ucase", ToUpper) + mod.Register("snake", SnakeCase) + return mod +} diff --git a/vendor/gopkg.in/go-playground/mold.v2/modifiers/string.go b/vendor/gopkg.in/go-playground/mold.v2/modifiers/string.go new file mode 100644 index 0000000..d1f1eff --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/modifiers/string.go @@ -0,0 +1,95 @@ +package modifiers + +import ( + "context" + "reflect" + "strings" + + snakecase "github.com/segmentio/go-snakecase" + "gopkg.in/go-playground/mold.v2" +) + +// TrimSpace trims extra space from text +func TrimSpace(ctx context.Context, t *mold.Transformer, v reflect.Value, param string) error { + s, ok := v.Interface().(string) + if !ok { + return nil + } + v.SetString(strings.TrimSpace(s)) + return nil +} + +// TrimLeft trims extra left hand side of string using provided cutset +func TrimLeft(ctx context.Context, t *mold.Transformer, v reflect.Value, param string) error { + s, ok := v.Interface().(string) + if !ok { + return nil + } + v.SetString(strings.TrimLeft(s, param)) + return nil +} + +// TrimRight trims extra right hand side of string using provided cutset +func TrimRight(ctx context.Context, t *mold.Transformer, v reflect.Value, param string) error { + s, ok := v.Interface().(string) + if !ok { + return nil + } + v.SetString(strings.TrimRight(s, param)) + return nil +} + +// TrimPrefix trims the string of a prefix +func TrimPrefix(ctx context.Context, t *mold.Transformer, v reflect.Value, param string) error { + s, ok := v.Interface().(string) + if !ok { + return nil + } + v.SetString(strings.TrimPrefix(s, param)) + return nil +} + +// TrimSuffix trims the string of a suffix +func TrimSuffix(ctx context.Context, t *mold.Transformer, v reflect.Value, param string) error { + s, ok := v.Interface().(string) + if !ok { + return nil + } + v.SetString(strings.TrimSuffix(s, param)) + return nil +} + +// ToLower convert string to lower case +func ToLower(ctx context.Context, t *mold.Transformer, v reflect.Value, param string) error { + s, ok := v.Interface().(string) + if !ok { + return nil + } + v.SetString(strings.ToLower(s)) + return nil +} + +// ToUpper convert string to upper case +func ToUpper(ctx context.Context, t *mold.Transformer, v reflect.Value, param string) error { + s, ok := v.Interface().(string) + if !ok { + return nil + } + v.SetString(strings.ToUpper(s)) + return nil +} + +// SnakeCase converts string to snake case +func SnakeCase(ctx context.Context, t *mold.Transformer, v reflect.Value, param string) error { + s, ok := v.Interface().(string) + if !ok { + return nil + } + v.SetString(snakecase.Snakecase(s)) + return nil +} + +// TODO: Add more +// - Snake_Case - can be combined with lowercase +// - CamelCase +// - many more diff --git a/vendor/gopkg.in/go-playground/mold.v2/mold.go b/vendor/gopkg.in/go-playground/mold.v2/mold.go new file mode 100644 index 0000000..886d58e --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/mold.go @@ -0,0 +1,312 @@ +package mold + +import ( + "context" + "fmt" + "reflect" + "strings" + "time" +) + +var ( + timeType = reflect.TypeOf(time.Time{}) + defaultCField = &cField{} + restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" + restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" +) + +// TODO - ensure StructLevel and Func get passed an interface and not *Transform directly + +// Func defines a transform function for use. +type Func func(ctx context.Context, t *Transformer, value reflect.Value, param string) error + +// StructLevelFunc accepts all values needed for struct level validation +type StructLevelFunc func(ctx context.Context, t *Transformer, value reflect.Value) error + +// Transformer is the base controlling object which contains +// all necessary information +type Transformer struct { + tagName string + aliases map[string]string + transformations map[string]Func + structLevelFuncs map[reflect.Type]StructLevelFunc + cCache *structCache + tCache *tagCache +} + +// New creates a new Transform object with default tag name of 'mold' +func New() *Transformer { + tc := new(tagCache) + tc.m.Store(make(map[string]*cTag)) + + sc := new(structCache) + sc.m.Store(make(map[reflect.Type]*cStruct)) + + return &Transformer{ + tagName: "mold", + aliases: make(map[string]string), + transformations: make(map[string]Func), + cCache: sc, + tCache: tc, + } +} + +// SetTagName sets the given tag name to be used. +// Default is "trans" +func (t *Transformer) SetTagName(tagName string) { + t.tagName = tagName +} + +// Register adds a transformation with the given tag +// +// NOTES: +// - if the key already exists, the previous transformation function will be replaced. +// - this method is not thread-safe it is intended that these all be registered before hand +func (t *Transformer) Register(tag string, fn Func) { + if len(tag) == 0 { + panic("Function Key cannot be empty") + } + + if fn == nil { + panic("Function cannot be empty") + } + + _, ok := restrictedTags[tag] + + if ok || strings.ContainsAny(tag, restrictedTagChars) { + panic(fmt.Sprintf(restrictedTagErr, tag)) + } + + t.transformations[tag] = fn +} + +// RegisterAlias registers a mapping of a single transform tag that +// defines a common or complex set of transformations to simplify adding transforms +// to structs. +// +// NOTE: this function is not thread-safe it is intended that these all be registered before hand +func (t *Transformer) RegisterAlias(alias, tags string) { + if len(alias) == 0 { + panic("Alias cannot be empty") + } + + if len(tags) == 0 { + panic("Aliased tags cannot be empty") + } + + _, ok := restrictedTags[alias] + + if ok || strings.ContainsAny(alias, restrictedTagChars) { + panic(fmt.Sprintf(restrictedAliasErr, alias)) + } + t.aliases[alias] = tags +} + +// RegisterStructLevel registers a StructLevelFunc against a number of types. +// Why does this exist? For structs for which you may not have access or rights to add tags too, +// from other packages your using. +// +// NOTES: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (t *Transformer) RegisterStructLevel(fn StructLevelFunc, types ...interface{}) { + if t.structLevelFuncs == nil { + t.structLevelFuncs = make(map[reflect.Type]StructLevelFunc) + } + + for _, typ := range types { + t.structLevelFuncs[reflect.TypeOf(typ)] = fn + } +} + +// Struct applies transformations against the provided struct +func (t *Transformer) Struct(ctx context.Context, v interface{}) error { + val := reflect.ValueOf(v) + + if val.Kind() != reflect.Ptr || val.IsNil() { + return &ErrInvalidTransformValue{typ: reflect.TypeOf(v), fn: "Struct"} + } + + val = val.Elem() + typ := val.Type() + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &ErrInvalidTransformation{typ: reflect.TypeOf(v)} + } + + return t.setByStruct(ctx, val, typ, nil) +} + +func (t *Transformer) setByStruct(ctx context.Context, current reflect.Value, typ reflect.Type, ct *cTag) (err error) { + cs, ok := t.cCache.Get(typ) + if !ok { + if cs, err = t.extractStructCache(current); err != nil { + return + } + } + + // run is struct has a corresponding struct level transformation + if cs.fn != nil { + if err = cs.fn(ctx, t, current); err != nil { + return + } + } + + var f *cField + + for i := 0; i < len(cs.fields); i++ { + f = cs.fields[i] + if err = t.setByField(ctx, current.Field(f.idx), f, f.cTags); err != nil { + return + } + } + return nil +} + +// Field applies the provided transformations against the variable +func (t *Transformer) Field(ctx context.Context, v interface{}, tags string) (err error) { + if len(tags) == 0 || tags == ignoreTag { + return nil + } + + val := reflect.ValueOf(v) + + if val.Kind() != reflect.Ptr || val.IsNil() { + return &ErrInvalidTransformValue{typ: reflect.TypeOf(v), fn: "Field"} + } + val = val.Elem() + + // find cached tag + ctag, ok := t.tCache.Get(tags) + if !ok { + t.tCache.lock.Lock() + + // could have been multiple trying to access, but once first is done this ensures tag + // isn't parsed again. + ctag, ok = t.tCache.Get(tags) + if !ok { + if ctag, _, err = t.parseFieldTagsRecursive(tags, "", "", false); err != nil { + t.tCache.lock.Unlock() + return + } + t.tCache.Set(tags, ctag) + } + t.tCache.lock.Unlock() + } + err = t.setByField(ctx, val, defaultCField, ctag) + return +} + +func (t *Transformer) setByField(ctx context.Context, orig reflect.Value, cf *cField, ct *cTag) (err error) { + current, kind := extractType(orig) + + if ct.hasTag { + for { + if ct == nil { + break + } + + switch ct.typeof { + case typeEndKeys: + return + case typeDive: + ct = ct.next + + switch kind { + case reflect.Slice, reflect.Array: + reusableCF := &cField{} + + for i := 0; i < current.Len(); i++ { + if err = t.setByField(ctx, current.Index(i), reusableCF, ct); err != nil { + return + } + } + + case reflect.Map: + reusableCF := &cField{} + + hasKeys := ct != nil && ct.typeof == typeKeys && ct.keys != nil + + for _, key := range current.MapKeys() { + newVal := reflect.New(current.Type().Elem()).Elem() + newVal.Set(current.MapIndex(key)) + + if hasKeys { + + // remove current map key as we may be changing it + // and re-add to the map afterwards + current.SetMapIndex(key, reflect.Value{}) + + newKey := reflect.New(current.Type().Key()).Elem() + newKey.Set(key) + key = newKey + + // handle map key + if err = t.setByField(ctx, key, reusableCF, ct.keys); err != nil { + return + } + + // can be nil when just keys being validated + if ct.next != nil { + if err = t.setByField(ctx, newVal, reusableCF, ct.next); err != nil { + return + } + } + } else { + if err = t.setByField(ctx, newVal, reusableCF, ct); err != nil { + return + } + } + current.SetMapIndex(key, newVal) + } + + default: + err = ErrInvalidDive + } + return + + default: + if !current.CanAddr() { + newVal := reflect.New(current.Type()).Elem() + newVal.Set(current) + if err = ct.fn(ctx, t, newVal, ct.param); err != nil { + return + } + orig.Set(newVal) + } else { + if err = ct.fn(ctx, t, current, ct.param); err != nil { + return + } + } + ct = ct.next + } + } + } + + // need to do this again because one of the previous + // sets could have set a struct value, where it was a + // nil pointer before + current, kind = extractType(current) + + if kind == reflect.Struct { + typ := current.Type() + if typ == timeType { + return + } + if ct != nil { + ct = ct.next + } + + if !current.CanAddr() { + newVal := reflect.New(typ).Elem() + newVal.Set(current) + + if err = t.setByStruct(ctx, newVal, typ, ct); err != nil { + return + } + orig.Set(newVal) + return + } + err = t.setByStruct(ctx, current, typ, ct) + } + return +} diff --git a/vendor/gopkg.in/go-playground/mold.v2/restricted.go b/vendor/gopkg.in/go-playground/mold.v2/restricted.go new file mode 100644 index 0000000..d68c043 --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/restricted.go @@ -0,0 +1,19 @@ +package mold + +const ( + diveTag = "dive" + restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" + tagSeparator = "," + ignoreTag = "-" + tagKeySeparator = "=" + utf8HexComma = "0x2C" + keysTag = "keys" + endKeysTag = "endkeys" +) + +var ( + restrictedTags = map[string]struct{}{ + diveTag: {}, + ignoreTag: {}, + } +) diff --git a/vendor/gopkg.in/go-playground/mold.v2/util.go b/vendor/gopkg.in/go-playground/mold.v2/util.go new file mode 100644 index 0000000..92d243a --- /dev/null +++ b/vendor/gopkg.in/go-playground/mold.v2/util.go @@ -0,0 +1,35 @@ +package mold + +import ( + "reflect" +) + +// extractType gets the actual underlying type of field value. +func extractType(current reflect.Value) (reflect.Value, reflect.Kind) { + switch current.Kind() { + case reflect.Ptr: + if current.IsNil() { + return current, reflect.Ptr + } + return extractType(current.Elem()) + + case reflect.Interface: + if current.IsNil() { + return current, reflect.Interface + } + return extractType(current.Elem()) + + default: + return current, current.Kind() + } +} + +// HasValue determines if a reflect.Value is it's default value +func HasValue(field reflect.Value) bool { + switch field.Kind() { + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return !field.IsNil() + default: + return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface() + } +} diff --git a/vendor/gopkg.in/validator.v2/LICENSE b/vendor/gopkg.in/validator.v2/LICENSE new file mode 100644 index 0000000..ad410e1 --- /dev/null +++ b/vendor/gopkg.in/validator.v2/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/gopkg.in/validator.v2/README.md b/vendor/gopkg.in/validator.v2/README.md new file mode 100644 index 0000000..5bc5d8f --- /dev/null +++ b/vendor/gopkg.in/validator.v2/README.md @@ -0,0 +1,188 @@ +Package validator +================ + +Package validator implements variable validations + +Installation +============ + +Just use go get. + +```bash +go get gopkg.in/validator.v2 +``` + +And then just import the package into your own code. + +```go +import ( + "gopkg.in/validator.v2" +) +``` + +Usage +===== + +Please see http://godoc.org/gopkg.in/validator.v2 for detailed usage docs. +A simple example would be. + +```go +type NewUserRequest struct { + Username string `validate:"min=3,max=40,regexp=^[a-zA-Z]*$"` + Name string `validate:"nonzero"` + Age int `validate:"min=21"` + Password string `validate:"min=8"` +} + +nur := NewUserRequest{Username: "something", Age: 20} +if errs := validator.Validate(nur); errs != nil { + // values not valid, deal with errors here +} +``` + +Builtin validators + +Here is the list of validators buildin in the package. Validators buildin +will check the element pointed to if the value to check is a pointer. +The `nil` pointer is treated as a valid value by validators buildin other +than `nonzero`, so you should to use `nonzero` if you don't want to +accept a `nil` pointer. + +``` +len + For numeric numbers, len will simply make sure that the + value is equal to the parameter given. For strings, it + checks that the string length is exactly that number of + characters. For slices, arrays, and maps, validates the + number of items. (Usage: len=10) + +max + For numeric numbers, max will simply make sure that the + value is lesser or equal to the parameter given. For strings, + it checks that the string length is at most that number of + characters. For slices, arrays, and maps, validates the + number of items. (Usage: max=10) + +min + For numeric numbers, min will simply make sure that the value + is greater or equal to the parameter given. For strings, it + checks that the string length is at least that number of + characters. For slices, arrays, and maps, validates the + number of items. (Usage: min=10) + +nonzero + This validates that the value is not zero. The appropriate + zero value is given by the Go spec (e.g. for int it's 0, for + string it's "", for pointers is nil, etc.) For structs, it + will not check to see if the struct itself has all zero + values, instead use a pointer or put nonzero on the struct's + keys that you care about. (Usage: nonzero) + +regexp + Only valid for string types, it will validator that the + value matches the regular expression provided as parameter. + (Usage: regexp=^a.*b$) +``` + +Custom validators + +It is possible to define custom validators by using SetValidationFunc. +First, one needs to create a validation function. + +```go +// Very simple validator +func notZZ(v interface{}, param string) error { + st := reflect.ValueOf(v) + if st.Kind() != reflect.String { + return errors.New("notZZ only validates strings") + } + if st.String() == "ZZ" { + return errors.New("value cannot be ZZ") + } + return nil +} +``` + +Then one needs to add it to the list of validators and give it a "tag" +name. + +```go +validator.SetValidationFunc("notzz", notZZ) +``` + +Then it is possible to use the notzz validation tag. This will print +"Field A error: value cannot be ZZ" + +```go +type T struct { + A string `validate:"nonzero,notzz"` +} +t := T{"ZZ"} +if errs := validator.Validate(t); errs != nil { + fmt.Printf("Field A error: %s\n", errs["A"][0]) +} +``` + +You can also have multiple sets of validator rules with SetTag(). + +```go +type T struct { + A int `foo:"nonzero" bar:"min=10"` +} +t := T{5} +SetTag("foo") +validator.Validate(t) // valid as it's nonzero +SetTag("bar") +validator.Validate(t) // invalid as it's less than 10 +``` + +SetTag is probably better used with multiple validators. + +```go +fooValidator := validator.NewValidator() +fooValidator.SetTag("foo") +barValidator := validator.NewValidator() +barValidator.SetTag("bar") +fooValidator.Validate(t) +barValidator.Validate(t) +``` + +This keeps the default validator's tag clean. Again, please refer to +godocs for a lot of more examples and different uses. + +Pull requests policy +==================== + +tl;dr. Contributions are welcome. + +The repository is organized in version branches. Pull requests to, say, the +`v2` branch that break API compatibility will not be accepted. It is okay to +break the API in master, *not in the branches*. + +As for validation functions, the preference is to keep the main code simple +and add most new functions to the validator-contrib repository. + +https://github.com/go-validator/validator-contrib + +For improvements and/or fixes to the builtin validation functions, please +make sure the behaviour will not break existing functionality in the branches. +If you see a case where the functionality of the builtin will change +significantly, please send a pull request against `master`. We can discuss then +whether the changes should be incorporated in the version branches as well. + +License +======= + +Copyright 2014 Roberto Teixeira + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/validator.v2/builtins.go b/vendor/gopkg.in/validator.v2/builtins.go new file mode 100644 index 0000000..4368684 --- /dev/null +++ b/vendor/gopkg.in/validator.v2/builtins.go @@ -0,0 +1,271 @@ +// Package validator implements value validations +// +// Copyright 2014 Roberto Teixeira +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validator + +import ( + "reflect" + "regexp" + "strconv" +) + +// nonzero tests whether a variable value non-zero +// as defined by the golang spec. +func nonzero(v interface{}, param string) error { + st := reflect.ValueOf(v) + valid := true + switch st.Kind() { + case reflect.String: + valid = len(st.String()) != 0 + case reflect.Ptr, reflect.Interface: + valid = !st.IsNil() + case reflect.Slice, reflect.Map, reflect.Array: + valid = st.Len() != 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + valid = st.Int() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + valid = st.Uint() != 0 + case reflect.Float32, reflect.Float64: + valid = st.Float() != 0 + case reflect.Bool: + valid = st.Bool() + case reflect.Invalid: + valid = false // always invalid + case reflect.Struct: + valid = true // always valid since only nil pointers are empty + default: + return ErrUnsupported + } + + if !valid { + return ErrZeroValue + } + return nil +} + +// length tests whether a variable's length is equal to a given +// value. For strings it tests the number of characters whereas +// for maps and slices it tests the number of items. +func length(v interface{}, param string) error { + st := reflect.ValueOf(v) + valid := true + if st.Kind() == reflect.Ptr { + if st.IsNil() { + return nil + } + st = st.Elem() + } + switch st.Kind() { + case reflect.String: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + valid = int64(len(st.String())) == p + case reflect.Slice, reflect.Map, reflect.Array: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + valid = int64(st.Len()) == p + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + valid = st.Int() == p + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p, err := asUint(param) + if err != nil { + return ErrBadParameter + } + valid = st.Uint() == p + case reflect.Float32, reflect.Float64: + p, err := asFloat(param) + if err != nil { + return ErrBadParameter + } + valid = st.Float() == p + default: + return ErrUnsupported + } + if !valid { + return ErrLen + } + return nil +} + +// min tests whether a variable value is larger or equal to a given +// number. For number types, it's a simple lesser-than test; for +// strings it tests the number of characters whereas for maps +// and slices it tests the number of items. +func min(v interface{}, param string) error { + st := reflect.ValueOf(v) + invalid := false + if st.Kind() == reflect.Ptr { + if st.IsNil() { + return nil + } + st = st.Elem() + } + switch st.Kind() { + case reflect.String: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + invalid = int64(len(st.String())) < p + case reflect.Slice, reflect.Map, reflect.Array: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + invalid = int64(st.Len()) < p + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + invalid = st.Int() < p + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p, err := asUint(param) + if err != nil { + return ErrBadParameter + } + invalid = st.Uint() < p + case reflect.Float32, reflect.Float64: + p, err := asFloat(param) + if err != nil { + return ErrBadParameter + } + invalid = st.Float() < p + default: + return ErrUnsupported + } + if invalid { + return ErrMin + } + return nil +} + +// max tests whether a variable value is lesser than a given +// value. For numbers, it's a simple lesser-than test; for +// strings it tests the number of characters whereas for maps +// and slices it tests the number of items. +func max(v interface{}, param string) error { + st := reflect.ValueOf(v) + var invalid bool + if st.Kind() == reflect.Ptr { + if st.IsNil() { + return nil + } + st = st.Elem() + } + switch st.Kind() { + case reflect.String: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + invalid = int64(len(st.String())) > p + case reflect.Slice, reflect.Map, reflect.Array: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + invalid = int64(st.Len()) > p + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p, err := asInt(param) + if err != nil { + return ErrBadParameter + } + invalid = st.Int() > p + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p, err := asUint(param) + if err != nil { + return ErrBadParameter + } + invalid = st.Uint() > p + case reflect.Float32, reflect.Float64: + p, err := asFloat(param) + if err != nil { + return ErrBadParameter + } + invalid = st.Float() > p + default: + return ErrUnsupported + } + if invalid { + return ErrMax + } + return nil +} + +// regex is the builtin validation function that checks +// whether the string variable matches a regular expression +func regex(v interface{}, param string) error { + s, ok := v.(string) + if !ok { + sptr, ok := v.(*string) + if !ok { + return ErrUnsupported + } + if sptr == nil { + return nil + } + s = *sptr + } + + re, err := regexp.Compile(param) + if err != nil { + return ErrBadParameter + } + + if !re.MatchString(s) { + return ErrRegexp + } + return nil +} + +// asInt retuns the parameter as a int64 +// or panics if it can't convert +func asInt(param string) (int64, error) { + i, err := strconv.ParseInt(param, 0, 64) + if err != nil { + return 0, ErrBadParameter + } + return i, nil +} + +// asUint retuns the parameter as a uint64 +// or panics if it can't convert +func asUint(param string) (uint64, error) { + i, err := strconv.ParseUint(param, 0, 64) + if err != nil { + return 0, ErrBadParameter + } + return i, nil +} + +// asFloat retuns the parameter as a float64 +// or panics if it can't convert +func asFloat(param string) (float64, error) { + i, err := strconv.ParseFloat(param, 64) + if err != nil { + return 0.0, ErrBadParameter + } + return i, nil +} diff --git a/vendor/gopkg.in/validator.v2/doc.go b/vendor/gopkg.in/validator.v2/doc.go new file mode 100644 index 0000000..5dde50d --- /dev/null +++ b/vendor/gopkg.in/validator.v2/doc.go @@ -0,0 +1,265 @@ +// Package validator implements value validations +// +// Copyright 2014 Roberto Teixeira +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package validator implements value validations based on struct tags. + +In code it is often necessary to validate that a given value is valid before +using it for something. A typical example might be something like this. + + if age < 18 { + return error.New("age cannot be under 18") + } + +This is a simple enough example, but it can get significantly more complex, +especially when dealing with structs. + + l := len(strings.Trim(s.Username)) + if l < 3 || l > 40 || !regexp.MatchString("^[a-zA-Z]$", s.Username) || s.Age < 18 || s.Password { + return errors.New("Invalid request") + } + +You get the idea. Package validator allows one to define valid values as +struct tags when defining a new struct type. + + type NewUserRequest struct { + Username string `validate:"min=3,max=40,regexp=^[a-zA-Z]*$"` + Name string `validate:"nonzero"` + Age int `validate:"min=18"` + Password string `validate:"min=8"` + } + +Then validating a variable of type NewUserRequest becomes trivial. + + nur := NewUserRequest{Username: "something", ...} + if errs := validator.Validate(nur); errs != nil { + // do something + } + +Builtin validator functions + +Here is the list of validator functions builtin in the package. + + len + For numeric numbers, len will simply make sure that the value is + equal to the parameter given. For strings, it checks that + the string length is exactly that number of characters. For slices, + arrays, and maps, validates the number of items. (Usage: len=10) + + max + For numeric numbers, max will simply make sure that the value is + lesser or equal to the parameter given. For strings, it checks that + the string length is at most that number of characters. For slices, + arrays, and maps, validates the number of items. (Usage: max=10) + + min + For numeric numbers, min will simply make sure that the value is + greater or equal to the parameter given. For strings, it checks that + the string length is at least that number of characters. For slices, + arrays, and maps, validates the number of items. (Usage: min=10) + + nonzero + This validates that the value is not zero. The appropriate zero value + is given by the Go spec (e.g. for int it's 0, for string it's "", for + pointers is nil, etc.) Usage: nonzero + + regexp + Only valid for string types, it will validate that the value matches + the regular expression provided as parameter. (Usage: regexp=^a.*b$) + + +Note that there are no tests to prevent conflicting validator parameters. For +instance, these fields will never be valid. + + ... + A int `validate:"max=0,min=1"` + B string `validate:"len=10,regexp=^$" + ... + +Custom validation functions + +It is possible to define custom validation functions by using SetValidationFunc. +First, one needs to create a validation function. + + // Very simple validation func + func notZZ(v interface{}, param string) error { + st := reflect.ValueOf(v) + if st.Kind() != reflect.String { + return validate.ErrUnsupported + } + if st.String() == "ZZ" { + return errors.New("value cannot be ZZ") + } + return nil + } + +Then one needs to add it to the list of validation funcs and give it a "tag" name. + + validate.SetValidationFunc("notzz", notZZ) + +Then it is possible to use the notzz validation tag. This will print +"Field A error: value cannot be ZZ" + + type T struct { + A string `validate:"nonzero,notzz"` + } + t := T{"ZZ"} + if errs := validator.Validate(t); errs != nil { + fmt.Printf("Field A error: %s\n", errs["A"][0]) + } + +To use parameters, it is very similar. + + // Very simple validator with parameter + func notSomething(v interface{}, param string) error { + st := reflect.ValueOf(v) + if st.Kind() != reflect.String { + return validate.ErrUnsupported + } + if st.String() == param { + return errors.New("value cannot be " + param) + } + return nil + } + +And then the code below should print "Field A error: value cannot be ABC". + + validator.SetValidationFunc("notsomething", notSomething) + type T struct { + A string `validate:"notsomething=ABC"` + } + t := T{"ABC"} + if errs := validator.Validate(t); errs != nil { + fmt.Printf("Field A error: %s\n", errs["A"][0]) + } + +As well, it is possible to overwrite builtin validation functions. + + validate.SetValidationFunc("min", myMinFunc) + +And you can delete a validation function by setting it to nil. + + validate.SetValidationFunc("notzz", nil) + validate.SetValidationFunc("nonzero", nil) + +Using a non-existing validation func in a field tag will always return +false and with error validate.ErrUnknownTag. + +Finally, package validator also provides a helper function that can be used +to validate simple variables/values. + + // errs: nil + errs = validator.Valid(42, "min=10, max=50") + + // errs: [validate.ErrZeroValue] + errs = validator.Valid(nil, "nonzero") + + // errs: [validate.ErrMin,validate.ErrMax] + errs = validator.Valid("hi", "nonzero,min=3,max=2") + +Custom tag name + +In case there is a reason why one would not wish to use tag 'validate' (maybe due to +a conflict with a different package), it is possible to tell the package to use +a different tag. + + validator.SetTag("valid") + +Then. + + Type T struct { + A int `valid:"min=8, max=10"` + B string `valid:"nonzero"` + } + +SetTag is permanent. The new tag name will be used until it is again changed +with a new call to SetTag. A way to temporarily use a different tag exists. + + validator.WithTag("foo").Validate(t) + validator.WithTag("bar").Validate(t) + // But this will go back to using 'validate' + validator.Validate(t) + +Multiple validators + +You may often need to have a different set of validation +rules for different situations. In all the examples above, +we only used the default validator but you could create a +new one and set specific rules for it. + +For instance, you might use the same struct to decode incoming JSON for a REST API +but your needs will change when you're using it to, say, create a new instance +in storage vs. when you need to change something. + + type User struct { + Username string `validate:"nonzero"` + Name string `validate:"nonzero"` + Age int `validate:"nonzero"` + Password string `validate:"nonzero"` + } + +Maybe when creating a new user, you need to make sure all values in the struct are filled, +but then you use the same struct to handle incoming requests to, say, change the password, +in which case you only need the Username and the Password and don't care for the others. +You might use two different validators. + + type User struct { + Username string `creating:"nonzero" chgpw:"nonzero"` + Name string `creating:"nonzero"` + Age int `creating:"nonzero"` + Password string `creating:"nonzero" chgpw:"nonzero"` + } + + var ( + creationValidator = validator.NewValidator() + chgPwValidator = validator.NewValidator() + ) + + func init() { + creationValidator.SetTag("creating") + chgPwValidator.SetTag("chgpw") + } + + ... + + func CreateUserHandler(w http.ResponseWriter, r *http.Request) { + var u User + json.NewDecoder(r.Body).Decode(&user) + if errs := creationValidator.Validate(user); errs != nil { + // the request did not include all of the User + // struct fields, so send a http.StatusBadRequest + // back or something + } + // create the new user + } + + func SetNewUserPasswordHandler(w http.ResponseWriter, r *http.Request) { + var u User + json.NewDecoder(r.Body).Decode(&user) + if errs := chgPwValidator.Validate(user); errs != nil { + // the request did not Username and Password, + // so send a http.StatusBadRequest + // back or something + } + // save the new password + } + +It is also possible to do all of that using only the default validator as long +as SetTag is always called before calling validator.Validate() or you chain the +with WithTag(). + +*/ +package validator diff --git a/vendor/gopkg.in/validator.v2/validator.go b/vendor/gopkg.in/validator.v2/validator.go new file mode 100644 index 0000000..a23f3ee --- /dev/null +++ b/vendor/gopkg.in/validator.v2/validator.go @@ -0,0 +1,369 @@ +// Package validator implements value validations +// +// Copyright 2014 Roberto Teixeira +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validator + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "strings" + "unicode" +) + +// TextErr is an error that also implements the TextMarshaller interface for +// serializing out to various plain text encodings. Packages creating their +// own custom errors should use TextErr if they're intending to use serializing +// formats like json, msgpack etc. +type TextErr struct { + Err error +} + +// Error implements the error interface. +func (t TextErr) Error() string { + return t.Err.Error() +} + +// MarshalText implements the TextMarshaller +func (t TextErr) MarshalText() ([]byte, error) { + return []byte(t.Err.Error()), nil +} + +var ( + // ErrZeroValue is the error returned when variable has zero valud + // and nonzero was specified + ErrZeroValue = TextErr{errors.New("zero value")} + // ErrMin is the error returned when variable is less than mininum + // value specified + ErrMin = TextErr{errors.New("less than min")} + // ErrMax is the error returned when variable is more than + // maximum specified + ErrMax = TextErr{errors.New("greater than max")} + // ErrLen is the error returned when length is not equal to + // param specified + ErrLen = TextErr{errors.New("invalid length")} + // ErrRegexp is the error returned when the value does not + // match the provided regular expression parameter + ErrRegexp = TextErr{errors.New("regular expression mismatch")} + // ErrUnsupported is the error error returned when a validation rule + // is used with an unsupported variable type + ErrUnsupported = TextErr{errors.New("unsupported type")} + // ErrBadParameter is the error returned when an invalid parameter + // is provided to a validation rule (e.g. a string where an int was + // expected (max=foo,len=bar) or missing a parameter when one is required (len=)) + ErrBadParameter = TextErr{errors.New("bad parameter")} + // ErrUnknownTag is the error returned when an unknown tag is found + ErrUnknownTag = TextErr{errors.New("unknown tag")} + // ErrInvalid is the error returned when variable is invalid + // (normally a nil pointer) + ErrInvalid = TextErr{errors.New("invalid value")} +) + +// ErrorMap is a map which contains all errors from validating a struct. +type ErrorMap map[string]ErrorArray + +// ErrorMap implements the Error interface so we can check error against nil. +// The returned error is if existent the first error which was added to the map. +func (err ErrorMap) Error() string { + for k, errs := range err { + if len(errs) > 0 { + return fmt.Sprintf("%s: %s", k, errs.Error()) + } + } + + return "" +} + +// ErrorArray is a slice of errors returned by the Validate function. +type ErrorArray []error + +// ErrorArray implements the Error interface and returns the first error as +// string if existent. +func (err ErrorArray) Error() string { + if len(err) > 0 { + return err[0].Error() + } + return "" +} + +// ValidationFunc is a function that receives the value of a +// field and a parameter used for the respective validation tag. +type ValidationFunc func(v interface{}, param string) error + +// Validator implements a validator +type Validator struct { + // Tag name being used. + tagName string + // validationFuncs is a map of ValidationFuncs indexed + // by their name. + validationFuncs map[string]ValidationFunc +} + +// Helper validator so users can use the +// functions directly from the package +var defaultValidator = NewValidator() + +// NewValidator creates a new Validator +func NewValidator() *Validator { + return &Validator{ + tagName: "validate", + validationFuncs: map[string]ValidationFunc{ + "nonzero": nonzero, + "len": length, + "min": min, + "max": max, + "regexp": regex, + }, + } +} + +// SetTag allows you to change the tag name used in structs +func SetTag(tag string) { + defaultValidator.SetTag(tag) +} + +// SetTag allows you to change the tag name used in structs +func (mv *Validator) SetTag(tag string) { + mv.tagName = tag +} + +// WithTag creates a new Validator with the new tag name. It is +// useful to chain-call with Validate so we don't change the tag +// name permanently: validator.WithTag("foo").Validate(t) +func WithTag(tag string) *Validator { + return defaultValidator.WithTag(tag) +} + +// WithTag creates a new Validator with the new tag name. It is +// useful to chain-call with Validate so we don't change the tag +// name permanently: validator.WithTag("foo").Validate(t) +func (mv *Validator) WithTag(tag string) *Validator { + v := mv.copy() + v.SetTag(tag) + return v +} + +// Copy a validator +func (mv *Validator) copy() *Validator { + newFuncs := map[string]ValidationFunc{} + for k, f := range mv.validationFuncs { + newFuncs[k] = f + } + return &Validator{ + tagName: mv.tagName, + validationFuncs: newFuncs, + } +} + +// SetValidationFunc sets the function to be used for a given +// validation constraint. Calling this function with nil vf +// is the same as removing the constraint function from the list. +func SetValidationFunc(name string, vf ValidationFunc) error { + return defaultValidator.SetValidationFunc(name, vf) +} + +// SetValidationFunc sets the function to be used for a given +// validation constraint. Calling this function with nil vf +// is the same as removing the constraint function from the list. +func (mv *Validator) SetValidationFunc(name string, vf ValidationFunc) error { + if name == "" { + return errors.New("name cannot be empty") + } + if vf == nil { + delete(mv.validationFuncs, name) + return nil + } + mv.validationFuncs[name] = vf + return nil +} + +// Validate validates the fields of a struct based +// on 'validator' tags and returns errors found indexed +// by the field name. +func Validate(v interface{}) error { + return defaultValidator.Validate(v) +} + +// Validate validates the fields of a struct based +// on 'validator' tags and returns errors found indexed +// by the field name. +func (mv *Validator) Validate(v interface{}) error { + sv := reflect.ValueOf(v) + st := reflect.TypeOf(v) + if sv.Kind() == reflect.Ptr && !sv.IsNil() { + return mv.Validate(sv.Elem().Interface()) + } + if sv.Kind() != reflect.Struct && sv.Kind() != reflect.Interface { + return ErrUnsupported + } + + nfields := sv.NumField() + m := make(ErrorMap) + for i := 0; i < nfields; i++ { + fname := st.Field(i).Name + if !unicode.IsUpper(rune(fname[0])) { + continue + } + + f := sv.Field(i) + // deal with pointers + for f.Kind() == reflect.Ptr && !f.IsNil() { + f = f.Elem() + } + tag := st.Field(i).Tag.Get(mv.tagName) + if tag == "-" { + continue + } + var errs ErrorArray + + if tag != "" { + err := mv.Valid(f.Interface(), tag) + if errors, ok := err.(ErrorArray); ok { + errs = errors + } else { + if err != nil { + errs = ErrorArray{err} + } + } + } + + mv.deepValidateCollection(f, fname, m) // no-op if field is not a struct, interface, array, slice or map + + if len(errs) > 0 { + m[st.Field(i).Name] = errs + } + } + + if len(m) > 0 { + return m + } + return nil +} + +func (mv *Validator) deepValidateCollection(f reflect.Value, fname string, m ErrorMap) { + switch f.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr: + e := mv.Validate(f.Interface()) + if e, ok := e.(ErrorMap); ok && len(e) > 0 { + for j, k := range e { + m[fname+"."+j] = k + } + } + case reflect.Array, reflect.Slice: + for i := 0; i < f.Len(); i++ { + mv.deepValidateCollection(f.Index(i), fmt.Sprintf("%s[%d]", fname, i), m) + } + case reflect.Map: + for _, key := range f.MapKeys() { + mv.deepValidateCollection(key, fmt.Sprintf("%s[%+v](key)", fname, key.Interface()), m) // validate the map key + value := f.MapIndex(key) + mv.deepValidateCollection(value, fmt.Sprintf("%s[%+v](value)", fname, key.Interface()), m) + } + } +} + +// Valid validates a value based on the provided +// tags and returns errors found or nil. +func Valid(val interface{}, tags string) error { + return defaultValidator.Valid(val, tags) +} + +// Valid validates a value based on the provided +// tags and returns errors found or nil. +func (mv *Validator) Valid(val interface{}, tags string) error { + if tags == "-" { + return nil + } + v := reflect.ValueOf(val) + if v.Kind() == reflect.Ptr && !v.IsNil() { + return mv.Valid(v.Elem().Interface(), tags) + } + var err error + switch v.Kind() { + case reflect.Invalid: + err = mv.validateVar(nil, tags) + default: + err = mv.validateVar(val, tags) + } + return err +} + +// validateVar validates one single variable +func (mv *Validator) validateVar(v interface{}, tag string) error { + tags, err := mv.parseTags(tag) + if err != nil { + // unknown tag found, give up. + return err + } + errs := make(ErrorArray, 0, len(tags)) + for _, t := range tags { + if err := t.Fn(v, t.Param); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +// tag represents one of the tag items +type tag struct { + Name string // name of the tag + Fn ValidationFunc // validation function to call + Param string // parameter to send to the validation function +} + +// separate by no escaped commas +var sepPattern *regexp.Regexp = regexp.MustCompile(`((?:^|[^\\])(?:\\\\)*),`) + +func splitUnescapedComma(str string) []string { + ret := []string{} + indexes := sepPattern.FindAllStringIndex(str, -1) + last := 0 + for _, is := range indexes { + ret = append(ret, str[last:is[1]-1]) + last = is[1] + } + ret = append(ret, str[last:]) + return ret +} + +// parseTags parses all individual tags found within a struct tag. +func (mv *Validator) parseTags(t string) ([]tag, error) { + tl := splitUnescapedComma(t) + tags := make([]tag, 0, len(tl)) + for _, i := range tl { + i = strings.Replace(i, `\,`, ",", -1) + tg := tag{} + v := strings.SplitN(i, "=", 2) + tg.Name = strings.Trim(v[0], " ") + if tg.Name == "" { + return []tag{}, ErrUnknownTag + } + if len(v) > 1 { + tg.Param = strings.Trim(v[1], " ") + } + var found bool + if tg.Fn, found = mv.validationFuncs[tg.Name]; !found { + return []tag{}, ErrUnknownTag + } + tags = append(tags, tg) + + } + return tags, nil +} diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 0000000..8da58fb --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 0000000..b50c6e8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 0000000..1f7e87e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 0000000..e4e56e2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,775 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + good = d.unmarshal(n.alias, out) + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 0000000..a1c2cc5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 0000000..0ee738e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 0000000..81d05df --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 0000000..7c1f5fa --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 0000000..6c151db --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 0000000..077fd1d --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2696 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 0000000..4c45e66 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 0000000..a2dde60 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 0000000..de85aa4 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,466 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 0000000..e25cee5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,738 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 0000000..8110ce3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +}